1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE specific DAG Nodes.
19 //===----------------------------------------------------------------------===//
21 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
23 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
24 SDTCisFP<1>, SDTCisVT<3, i8>]>;
26 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
27 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
28 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
33 [SDNPCommutative, SDNPAssociative]>;
34 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
35 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
36 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
37 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
38 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
39 def X86pshufb : SDNode<"X86ISD::PSHUFB",
40 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
42 def X86pextrb : SDNode<"X86ISD::PEXTRB",
43 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
44 def X86pextrw : SDNode<"X86ISD::PEXTRW",
45 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
46 def X86pinsrb : SDNode<"X86ISD::PINSRB",
47 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
48 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
49 def X86pinsrw : SDNode<"X86ISD::PINSRW",
50 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
51 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
52 def X86insrtps : SDNode<"X86ISD::INSERTPS",
53 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
54 SDTCisVT<2, f32>, SDTCisPtrTy<3>]>>;
55 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
56 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
57 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
58 [SDNPHasChain, SDNPMayLoad]>;
59 def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
60 def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
61 def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
62 def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
63 def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
64 def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
65 def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
66 def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
67 def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
68 def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
69 def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
70 def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
72 //===----------------------------------------------------------------------===//
73 // SSE Complex Patterns
74 //===----------------------------------------------------------------------===//
76 // These are 'extloads' from a scalar to the low element of a vector, zeroing
77 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
79 def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
80 [SDNPHasChain, SDNPMayLoad]>;
81 def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
82 [SDNPHasChain, SDNPMayLoad]>;
84 def ssmem : Operand<v4f32> {
85 let PrintMethod = "printf32mem";
86 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
88 def sdmem : Operand<v2f64> {
89 let PrintMethod = "printf64mem";
90 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
93 //===----------------------------------------------------------------------===//
94 // SSE pattern fragments
95 //===----------------------------------------------------------------------===//
97 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
98 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
99 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
100 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
102 // Like 'store', but always requires vector alignment.
103 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
104 (store node:$val, node:$ptr), [{
105 return cast<StoreSDNode>(N)->getAlignment() >= 16;
108 // Like 'load', but always requires vector alignment.
109 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
110 return cast<LoadSDNode>(N)->getAlignment() >= 16;
113 def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
114 def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
115 def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
116 def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
117 def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
118 def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
120 // Like 'load', but uses special alignment checks suitable for use in
121 // memory operands in most SSE instructions, which are required to
122 // be naturally aligned on some targets but not on others.
123 // FIXME: Actually implement support for targets that don't require the
124 // alignment. This probably wants a subtarget predicate.
125 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
126 return cast<LoadSDNode>(N)->getAlignment() >= 16;
129 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
130 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
131 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
132 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
133 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
134 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
135 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
137 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
139 // FIXME: 8 byte alignment for mmx reads is not required
140 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
141 return cast<LoadSDNode>(N)->getAlignment() >= 8;
144 def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
145 def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
146 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
147 def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
149 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
150 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
151 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
152 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
153 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
154 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
156 def vzmovl_v2i64 : PatFrag<(ops node:$src),
157 (bitconvert (v2i64 (X86vzmovl
158 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
159 def vzmovl_v4i32 : PatFrag<(ops node:$src),
160 (bitconvert (v4i32 (X86vzmovl
161 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
163 def vzload_v2i64 : PatFrag<(ops node:$src),
164 (bitconvert (v2i64 (X86vzload node:$src)))>;
167 def fp32imm0 : PatLeaf<(f32 fpimm), [{
168 return N->isExactlyValue(+0.0);
171 def PSxLDQ_imm : SDNodeXForm<imm, [{
172 // Transformation function: imm >> 3
173 return getI32Imm(N->getZExtValue() >> 3);
176 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
178 def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
179 return getI8Imm(X86::getShuffleSHUFImmediate(N));
182 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
184 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
185 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
188 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
190 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
191 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
194 def SSE_splat_mask : PatLeaf<(build_vector), [{
195 return X86::isSplatMask(N);
196 }], SHUFFLE_get_shuf_imm>;
198 def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
199 return X86::isSplatLoMask(N);
202 def MOVDDUP_shuffle_mask : PatLeaf<(build_vector), [{
203 return X86::isMOVDDUPMask(N);
206 def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
207 return X86::isMOVHLPSMask(N);
210 def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
211 return X86::isMOVHLPS_v_undef_Mask(N);
214 def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
215 return X86::isMOVHPMask(N);
218 def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
219 return X86::isMOVLPMask(N);
222 def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
223 return X86::isMOVLMask(N);
226 def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
227 return X86::isMOVSHDUPMask(N);
230 def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
231 return X86::isMOVSLDUPMask(N);
234 def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
235 return X86::isUNPCKLMask(N);
238 def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
239 return X86::isUNPCKHMask(N);
242 def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
243 return X86::isUNPCKL_v_undef_Mask(N);
246 def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
247 return X86::isUNPCKH_v_undef_Mask(N);
250 def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
251 return X86::isPSHUFDMask(N);
252 }], SHUFFLE_get_shuf_imm>;
254 def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
255 return X86::isPSHUFHWMask(N);
256 }], SHUFFLE_get_pshufhw_imm>;
258 def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
259 return X86::isPSHUFLWMask(N);
260 }], SHUFFLE_get_pshuflw_imm>;
262 def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
263 return X86::isPSHUFDMask(N);
264 }], SHUFFLE_get_shuf_imm>;
266 def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
267 return X86::isSHUFPMask(N);
268 }], SHUFFLE_get_shuf_imm>;
270 def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
271 return X86::isSHUFPMask(N);
272 }], SHUFFLE_get_shuf_imm>;
275 //===----------------------------------------------------------------------===//
276 // SSE scalar FP Instructions
277 //===----------------------------------------------------------------------===//
279 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
280 // scheduler into a branch sequence.
281 // These are expanded by the scheduler.
282 let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
283 def CMOV_FR32 : I<0, Pseudo,
284 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
285 "#CMOV_FR32 PSEUDO!",
286 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
288 def CMOV_FR64 : I<0, Pseudo,
289 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
290 "#CMOV_FR64 PSEUDO!",
291 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
293 def CMOV_V4F32 : I<0, Pseudo,
294 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
295 "#CMOV_V4F32 PSEUDO!",
297 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
299 def CMOV_V2F64 : I<0, Pseudo,
300 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
301 "#CMOV_V2F64 PSEUDO!",
303 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
305 def CMOV_V2I64 : I<0, Pseudo,
306 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
307 "#CMOV_V2I64 PSEUDO!",
309 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
313 //===----------------------------------------------------------------------===//
315 //===----------------------------------------------------------------------===//
318 let neverHasSideEffects = 1 in
319 def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
320 "movss\t{$src, $dst|$dst, $src}", []>;
321 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
322 def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
323 "movss\t{$src, $dst|$dst, $src}",
324 [(set FR32:$dst, (loadf32 addr:$src))]>;
325 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
326 "movss\t{$src, $dst|$dst, $src}",
327 [(store FR32:$src, addr:$dst)]>;
329 // Conversion instructions
330 def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
331 "cvttss2si\t{$src, $dst|$dst, $src}",
332 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
333 def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
334 "cvttss2si\t{$src, $dst|$dst, $src}",
335 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
336 def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
337 "cvtsi2ss\t{$src, $dst|$dst, $src}",
338 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
339 def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
340 "cvtsi2ss\t{$src, $dst|$dst, $src}",
341 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
343 // Match intrinsics which expect XMM operand(s).
344 def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
345 "cvtss2si\t{$src, $dst|$dst, $src}",
346 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
347 def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
348 "cvtss2si\t{$src, $dst|$dst, $src}",
349 [(set GR32:$dst, (int_x86_sse_cvtss2si
350 (load addr:$src)))]>;
352 // Match intrinisics which expect MM and XMM operand(s).
353 def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
354 "cvtps2pi\t{$src, $dst|$dst, $src}",
355 [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
356 def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
357 "cvtps2pi\t{$src, $dst|$dst, $src}",
358 [(set VR64:$dst, (int_x86_sse_cvtps2pi
359 (load addr:$src)))]>;
360 def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
361 "cvttps2pi\t{$src, $dst|$dst, $src}",
362 [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
363 def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
364 "cvttps2pi\t{$src, $dst|$dst, $src}",
365 [(set VR64:$dst, (int_x86_sse_cvttps2pi
366 (load addr:$src)))]>;
367 let Constraints = "$src1 = $dst" in {
368 def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
369 (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
370 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
371 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
373 def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
374 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
375 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
376 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
377 (load addr:$src2)))]>;
380 // Aliases for intrinsics
381 def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
382 "cvttss2si\t{$src, $dst|$dst, $src}",
384 (int_x86_sse_cvttss2si VR128:$src))]>;
385 def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
386 "cvttss2si\t{$src, $dst|$dst, $src}",
388 (int_x86_sse_cvttss2si(load addr:$src)))]>;
390 let Constraints = "$src1 = $dst" in {
391 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
392 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
393 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
394 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
396 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
397 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
398 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
399 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
400 (loadi32 addr:$src2)))]>;
403 // Comparison instructions
404 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
405 def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
406 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
407 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
409 def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
410 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
411 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
414 let Defs = [EFLAGS] in {
415 def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
416 "ucomiss\t{$src2, $src1|$src1, $src2}",
417 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
418 def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
419 "ucomiss\t{$src2, $src1|$src1, $src2}",
420 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
424 // Aliases to match intrinsics which expect XMM operand(s).
425 let Constraints = "$src1 = $dst" in {
426 def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
427 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
428 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
429 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
430 VR128:$src, imm:$cc))]>;
431 def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
432 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
433 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
434 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
435 (load addr:$src), imm:$cc))]>;
438 let Defs = [EFLAGS] in {
439 def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
440 "ucomiss\t{$src2, $src1|$src1, $src2}",
441 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
443 def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
444 "ucomiss\t{$src2, $src1|$src1, $src2}",
445 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
448 def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
449 "comiss\t{$src2, $src1|$src1, $src2}",
450 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
452 def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
453 "comiss\t{$src2, $src1|$src1, $src2}",
454 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
458 // Aliases of packed SSE1 instructions for scalar use. These all have names that
461 // Alias instructions that map fld0 to pxor for sse.
462 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
463 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
464 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
465 Requires<[HasSSE1]>, TB, OpSize;
467 // Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
469 let neverHasSideEffects = 1 in
470 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
471 "movaps\t{$src, $dst|$dst, $src}", []>;
473 // Alias instruction to load FR32 from f128mem using movaps. Upper bits are
475 let canFoldAsLoad = 1 in
476 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
477 "movaps\t{$src, $dst|$dst, $src}",
478 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
480 // Alias bitwise logical operations using SSE logical ops on packed FP values.
481 let Constraints = "$src1 = $dst" in {
482 let isCommutable = 1 in {
483 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst),
484 (ins FR32:$src1, FR32:$src2),
485 "andps\t{$src2, $dst|$dst, $src2}",
486 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
487 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst),
488 (ins FR32:$src1, FR32:$src2),
489 "orps\t{$src2, $dst|$dst, $src2}",
490 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
491 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst),
492 (ins FR32:$src1, FR32:$src2),
493 "xorps\t{$src2, $dst|$dst, $src2}",
494 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
497 def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst),
498 (ins FR32:$src1, f128mem:$src2),
499 "andps\t{$src2, $dst|$dst, $src2}",
500 [(set FR32:$dst, (X86fand FR32:$src1,
501 (memopfsf32 addr:$src2)))]>;
502 def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst),
503 (ins FR32:$src1, f128mem:$src2),
504 "orps\t{$src2, $dst|$dst, $src2}",
505 [(set FR32:$dst, (X86for FR32:$src1,
506 (memopfsf32 addr:$src2)))]>;
507 def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst),
508 (ins FR32:$src1, f128mem:$src2),
509 "xorps\t{$src2, $dst|$dst, $src2}",
510 [(set FR32:$dst, (X86fxor FR32:$src1,
511 (memopfsf32 addr:$src2)))]>;
513 let neverHasSideEffects = 1 in {
514 def FsANDNPSrr : PSI<0x55, MRMSrcReg,
515 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
516 "andnps\t{$src2, $dst|$dst, $src2}", []>;
518 def FsANDNPSrm : PSI<0x55, MRMSrcMem,
519 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
520 "andnps\t{$src2, $dst|$dst, $src2}", []>;
524 /// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
526 /// In addition, we also have a special variant of the scalar form here to
527 /// represent the associated intrinsic operation. This form is unlike the
528 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
529 /// and leaves the top elements undefined.
531 /// These three forms can each be reg+reg or reg+mem, so there are a total of
532 /// six "instructions".
534 let Constraints = "$src1 = $dst" in {
535 multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
536 SDNode OpNode, Intrinsic F32Int,
537 bit Commutable = 0> {
538 // Scalar operation, reg+reg.
539 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
540 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
541 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
542 let isCommutable = Commutable;
545 // Scalar operation, reg+mem.
546 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
547 (ins FR32:$src1, f32mem:$src2),
548 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
549 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
551 // Vector operation, reg+reg.
552 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
553 (ins VR128:$src1, VR128:$src2),
554 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
555 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
556 let isCommutable = Commutable;
559 // Vector operation, reg+mem.
560 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
561 (ins VR128:$src1, f128mem:$src2),
562 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
563 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
565 // Intrinsic operation, reg+reg.
566 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
567 (ins VR128:$src1, VR128:$src2),
568 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
569 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
570 let isCommutable = Commutable;
573 // Intrinsic operation, reg+mem.
574 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
575 (ins VR128:$src1, ssmem:$src2),
576 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
577 [(set VR128:$dst, (F32Int VR128:$src1,
578 sse_load_f32:$src2))]>;
582 // Arithmetic instructions
583 defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
584 defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
585 defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
586 defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
588 /// sse1_fp_binop_rm - Other SSE1 binops
590 /// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
591 /// instructions for a full-vector intrinsic form. Operations that map
592 /// onto C operators don't use this form since they just use the plain
593 /// vector form instead of having a separate vector intrinsic form.
595 /// This provides a total of eight "instructions".
597 let Constraints = "$src1 = $dst" in {
598 multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
602 bit Commutable = 0> {
604 // Scalar operation, reg+reg.
605 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
606 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
607 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
608 let isCommutable = Commutable;
611 // Scalar operation, reg+mem.
612 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
613 (ins FR32:$src1, f32mem:$src2),
614 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
615 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
617 // Vector operation, reg+reg.
618 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
619 (ins VR128:$src1, VR128:$src2),
620 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
621 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
622 let isCommutable = Commutable;
625 // Vector operation, reg+mem.
626 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
627 (ins VR128:$src1, f128mem:$src2),
628 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
629 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
631 // Intrinsic operation, reg+reg.
632 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
633 (ins VR128:$src1, VR128:$src2),
634 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
635 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
636 let isCommutable = Commutable;
639 // Intrinsic operation, reg+mem.
640 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
641 (ins VR128:$src1, ssmem:$src2),
642 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
643 [(set VR128:$dst, (F32Int VR128:$src1,
644 sse_load_f32:$src2))]>;
646 // Vector intrinsic operation, reg+reg.
647 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst),
648 (ins VR128:$src1, VR128:$src2),
649 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
650 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
651 let isCommutable = Commutable;
654 // Vector intrinsic operation, reg+mem.
655 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst),
656 (ins VR128:$src1, f128mem:$src2),
657 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
658 [(set VR128:$dst, (V4F32Int VR128:$src1, (memopv4f32 addr:$src2)))]>;
662 defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
663 int_x86_sse_max_ss, int_x86_sse_max_ps>;
664 defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
665 int_x86_sse_min_ss, int_x86_sse_min_ps>;
667 //===----------------------------------------------------------------------===//
668 // SSE packed FP Instructions
671 let neverHasSideEffects = 1 in
672 def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
673 "movaps\t{$src, $dst|$dst, $src}", []>;
674 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
675 def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
676 "movaps\t{$src, $dst|$dst, $src}",
677 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
679 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
680 "movaps\t{$src, $dst|$dst, $src}",
681 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
683 let neverHasSideEffects = 1 in
684 def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
685 "movups\t{$src, $dst|$dst, $src}", []>;
686 let canFoldAsLoad = 1 in
687 def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
688 "movups\t{$src, $dst|$dst, $src}",
689 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
690 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
691 "movups\t{$src, $dst|$dst, $src}",
692 [(store (v4f32 VR128:$src), addr:$dst)]>;
694 // Intrinsic forms of MOVUPS load and store
695 let canFoldAsLoad = 1 in
696 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
697 "movups\t{$src, $dst|$dst, $src}",
698 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
699 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
700 "movups\t{$src, $dst|$dst, $src}",
701 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
703 let Constraints = "$src1 = $dst" in {
704 let AddedComplexity = 20 in {
705 def MOVLPSrm : PSI<0x12, MRMSrcMem,
706 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
707 "movlps\t{$src2, $dst|$dst, $src2}",
709 (v4f32 (vector_shuffle VR128:$src1,
710 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
711 MOVLP_shuffle_mask)))]>;
712 def MOVHPSrm : PSI<0x16, MRMSrcMem,
713 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
714 "movhps\t{$src2, $dst|$dst, $src2}",
716 (v4f32 (vector_shuffle VR128:$src1,
717 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
718 MOVHP_shuffle_mask)))]>;
720 } // Constraints = "$src1 = $dst"
723 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
724 "movlps\t{$src, $dst|$dst, $src}",
725 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
726 (iPTR 0))), addr:$dst)]>;
728 // v2f64 extract element 1 is always custom lowered to unpack high to low
729 // and extract element 0 so the non-store version isn't too horrible.
730 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
731 "movhps\t{$src, $dst|$dst, $src}",
732 [(store (f64 (vector_extract
733 (v2f64 (vector_shuffle
734 (bc_v2f64 (v4f32 VR128:$src)), (undef),
735 UNPCKH_shuffle_mask)), (iPTR 0))),
738 let Constraints = "$src1 = $dst" in {
739 let AddedComplexity = 20 in {
740 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
741 "movlhps\t{$src2, $dst|$dst, $src2}",
743 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
744 MOVHP_shuffle_mask)))]>;
746 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
747 "movhlps\t{$src2, $dst|$dst, $src2}",
749 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
750 MOVHLPS_shuffle_mask)))]>;
752 } // Constraints = "$src1 = $dst"
754 let AddedComplexity = 20 in
755 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef), MOVDDUP_shuffle_mask)),
756 (MOVLHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
763 /// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
765 /// In addition, we also have a special variant of the scalar form here to
766 /// represent the associated intrinsic operation. This form is unlike the
767 /// plain scalar form, in that it takes an entire vector (instead of a
768 /// scalar) and leaves the top elements undefined.
770 /// And, we have a special variant form for a full-vector intrinsic form.
772 /// These four forms can each have a reg or a mem operand, so there are a
773 /// total of eight "instructions".
775 multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
779 bit Commutable = 0> {
780 // Scalar operation, reg.
781 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
782 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
783 [(set FR32:$dst, (OpNode FR32:$src))]> {
784 let isCommutable = Commutable;
787 // Scalar operation, mem.
788 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
789 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
790 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
792 // Vector operation, reg.
793 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
794 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
795 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
796 let isCommutable = Commutable;
799 // Vector operation, mem.
800 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
801 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
802 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
804 // Intrinsic operation, reg.
805 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
806 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
807 [(set VR128:$dst, (F32Int VR128:$src))]> {
808 let isCommutable = Commutable;
811 // Intrinsic operation, mem.
812 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
813 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
814 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
816 // Vector intrinsic operation, reg
817 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
818 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
819 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
820 let isCommutable = Commutable;
823 // Vector intrinsic operation, mem
824 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
825 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
826 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
830 defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
831 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
833 // Reciprocal approximations. Note that these typically require refinement
834 // in order to obtain suitable precision.
835 defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
836 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
837 defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
838 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
841 let Constraints = "$src1 = $dst" in {
842 let isCommutable = 1 in {
843 def ANDPSrr : PSI<0x54, MRMSrcReg,
844 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
845 "andps\t{$src2, $dst|$dst, $src2}",
846 [(set VR128:$dst, (v2i64
847 (and VR128:$src1, VR128:$src2)))]>;
848 def ORPSrr : PSI<0x56, MRMSrcReg,
849 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
850 "orps\t{$src2, $dst|$dst, $src2}",
851 [(set VR128:$dst, (v2i64
852 (or VR128:$src1, VR128:$src2)))]>;
853 def XORPSrr : PSI<0x57, MRMSrcReg,
854 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
855 "xorps\t{$src2, $dst|$dst, $src2}",
856 [(set VR128:$dst, (v2i64
857 (xor VR128:$src1, VR128:$src2)))]>;
860 def ANDPSrm : PSI<0x54, MRMSrcMem,
861 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
862 "andps\t{$src2, $dst|$dst, $src2}",
863 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
864 (memopv2i64 addr:$src2)))]>;
865 def ORPSrm : PSI<0x56, MRMSrcMem,
866 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
867 "orps\t{$src2, $dst|$dst, $src2}",
868 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
869 (memopv2i64 addr:$src2)))]>;
870 def XORPSrm : PSI<0x57, MRMSrcMem,
871 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
872 "xorps\t{$src2, $dst|$dst, $src2}",
873 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
874 (memopv2i64 addr:$src2)))]>;
875 def ANDNPSrr : PSI<0x55, MRMSrcReg,
876 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
877 "andnps\t{$src2, $dst|$dst, $src2}",
879 (v2i64 (and (xor VR128:$src1,
880 (bc_v2i64 (v4i32 immAllOnesV))),
882 def ANDNPSrm : PSI<0x55, MRMSrcMem,
883 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
884 "andnps\t{$src2, $dst|$dst, $src2}",
886 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
887 (bc_v2i64 (v4i32 immAllOnesV))),
888 (memopv2i64 addr:$src2))))]>;
891 let Constraints = "$src1 = $dst" in {
892 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
893 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
894 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
895 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
896 VR128:$src, imm:$cc))]>;
897 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
898 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
899 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
900 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
901 (memop addr:$src), imm:$cc))]>;
903 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
904 (CMPPSrri VR128:$src1, VR128:$src2, imm:$cc)>;
905 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
906 (CMPPSrmi VR128:$src1, addr:$src2, imm:$cc)>;
908 // Shuffle and unpack instructions
909 let Constraints = "$src1 = $dst" in {
910 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
911 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
912 (outs VR128:$dst), (ins VR128:$src1,
913 VR128:$src2, i32i8imm:$src3),
914 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
916 (v4f32 (vector_shuffle
917 VR128:$src1, VR128:$src2,
918 SHUFP_shuffle_mask:$src3)))]>;
919 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
920 (outs VR128:$dst), (ins VR128:$src1,
921 f128mem:$src2, i32i8imm:$src3),
922 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
924 (v4f32 (vector_shuffle
925 VR128:$src1, (memopv4f32 addr:$src2),
926 SHUFP_shuffle_mask:$src3)))]>;
928 let AddedComplexity = 10 in {
929 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
930 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
931 "unpckhps\t{$src2, $dst|$dst, $src2}",
933 (v4f32 (vector_shuffle
934 VR128:$src1, VR128:$src2,
935 UNPCKH_shuffle_mask)))]>;
936 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
937 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
938 "unpckhps\t{$src2, $dst|$dst, $src2}",
940 (v4f32 (vector_shuffle
941 VR128:$src1, (memopv4f32 addr:$src2),
942 UNPCKH_shuffle_mask)))]>;
944 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
945 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
946 "unpcklps\t{$src2, $dst|$dst, $src2}",
948 (v4f32 (vector_shuffle
949 VR128:$src1, VR128:$src2,
950 UNPCKL_shuffle_mask)))]>;
951 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
952 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
953 "unpcklps\t{$src2, $dst|$dst, $src2}",
955 (v4f32 (vector_shuffle
956 VR128:$src1, (memopv4f32 addr:$src2),
957 UNPCKL_shuffle_mask)))]>;
959 } // Constraints = "$src1 = $dst"
962 def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
963 "movmskps\t{$src, $dst|$dst, $src}",
964 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
965 def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
966 "movmskpd\t{$src, $dst|$dst, $src}",
967 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
969 // Prefetch intrinsic.
970 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
971 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
972 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
973 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
974 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
975 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
976 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
977 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
979 // Non-temporal stores
980 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
981 "movntps\t{$src, $dst|$dst, $src}",
982 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
984 // Load, store, and memory fence
985 def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
988 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
989 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
990 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
991 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
993 // Alias instructions that map zero vector to pxor / xorp* for sse.
994 // We set canFoldAsLoad because this can be converted to a constant-pool
995 // load of an all-zeros value if folding it would be beneficial.
996 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1 in
997 def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
999 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
1001 let Predicates = [HasSSE1] in {
1002 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
1003 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
1004 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
1005 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
1006 def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
1009 // FR32 to 128-bit vector conversion.
1010 let isAsCheapAsAMove = 1 in
1011 def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
1012 "movss\t{$src, $dst|$dst, $src}",
1014 (v4f32 (scalar_to_vector FR32:$src)))]>;
1015 def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
1016 "movss\t{$src, $dst|$dst, $src}",
1018 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
1020 // FIXME: may not be able to eliminate this movss with coalescing the src and
1021 // dest register classes are different. We really want to write this pattern
1023 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
1024 // (f32 FR32:$src)>;
1025 let isAsCheapAsAMove = 1 in
1026 def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
1027 "movss\t{$src, $dst|$dst, $src}",
1028 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
1030 def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
1031 "movss\t{$src, $dst|$dst, $src}",
1032 [(store (f32 (vector_extract (v4f32 VR128:$src),
1033 (iPTR 0))), addr:$dst)]>;
1036 // Move to lower bits of a VR128, leaving upper bits alone.
1037 // Three operand (but two address) aliases.
1038 let Constraints = "$src1 = $dst" in {
1039 let neverHasSideEffects = 1 in
1040 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
1041 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
1042 "movss\t{$src2, $dst|$dst, $src2}", []>;
1044 let AddedComplexity = 15 in
1045 def MOVLPSrr : SSI<0x10, MRMSrcReg,
1046 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1047 "movss\t{$src2, $dst|$dst, $src2}",
1049 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
1050 MOVL_shuffle_mask)))]>;
1053 // Move to lower bits of a VR128 and zeroing upper bits.
1054 // Loading from memory automatically zeroing upper bits.
1055 let AddedComplexity = 20 in
1056 def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
1057 "movss\t{$src, $dst|$dst, $src}",
1058 [(set VR128:$dst, (v4f32 (X86vzmovl (v4f32 (scalar_to_vector
1059 (loadf32 addr:$src))))))]>;
1061 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
1062 (MOVZSS2PSrm addr:$src)>;
1064 //===----------------------------------------------------------------------===//
1065 // SSE2 Instructions
1066 //===----------------------------------------------------------------------===//
1068 // Move Instructions
1069 let neverHasSideEffects = 1 in
1070 def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1071 "movsd\t{$src, $dst|$dst, $src}", []>;
1072 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
1073 def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1074 "movsd\t{$src, $dst|$dst, $src}",
1075 [(set FR64:$dst, (loadf64 addr:$src))]>;
1076 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
1077 "movsd\t{$src, $dst|$dst, $src}",
1078 [(store FR64:$src, addr:$dst)]>;
1080 // Conversion instructions
1081 def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
1082 "cvttsd2si\t{$src, $dst|$dst, $src}",
1083 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
1084 def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
1085 "cvttsd2si\t{$src, $dst|$dst, $src}",
1086 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1087 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1088 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1089 [(set FR32:$dst, (fround FR64:$src))]>;
1090 def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1091 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1092 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
1093 def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
1094 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1095 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
1096 def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
1097 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1098 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1100 // SSE2 instructions with XS prefix
1101 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1102 "cvtss2sd\t{$src, $dst|$dst, $src}",
1103 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1104 Requires<[HasSSE2]>;
1105 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1106 "cvtss2sd\t{$src, $dst|$dst, $src}",
1107 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1108 Requires<[HasSSE2]>;
1110 // Match intrinsics which expect XMM operand(s).
1111 def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1112 "cvtsd2si\t{$src, $dst|$dst, $src}",
1113 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
1114 def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1115 "cvtsd2si\t{$src, $dst|$dst, $src}",
1116 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1117 (load addr:$src)))]>;
1119 // Match intrinisics which expect MM and XMM operand(s).
1120 def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1121 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1122 [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
1123 def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1124 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1125 [(set VR64:$dst, (int_x86_sse_cvtpd2pi
1126 (memop addr:$src)))]>;
1127 def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1128 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1129 [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
1130 def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1131 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1132 [(set VR64:$dst, (int_x86_sse_cvttpd2pi
1133 (memop addr:$src)))]>;
1134 def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
1135 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1136 [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
1137 def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1138 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1139 [(set VR128:$dst, (int_x86_sse_cvtpi2pd
1140 (load addr:$src)))]>;
1142 // Aliases for intrinsics
1143 def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1144 "cvttsd2si\t{$src, $dst|$dst, $src}",
1146 (int_x86_sse2_cvttsd2si VR128:$src))]>;
1147 def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1148 "cvttsd2si\t{$src, $dst|$dst, $src}",
1149 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1150 (load addr:$src)))]>;
1152 // Comparison instructions
1153 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1154 def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
1155 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
1156 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1158 def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
1159 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
1160 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1163 let Defs = [EFLAGS] in {
1164 def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
1165 "ucomisd\t{$src2, $src1|$src1, $src2}",
1166 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
1167 def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
1168 "ucomisd\t{$src2, $src1|$src1, $src2}",
1169 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
1170 (implicit EFLAGS)]>;
1171 } // Defs = [EFLAGS]
1173 // Aliases to match intrinsics which expect XMM operand(s).
1174 let Constraints = "$src1 = $dst" in {
1175 def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
1176 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1177 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1178 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1179 VR128:$src, imm:$cc))]>;
1180 def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
1181 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
1182 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1183 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1184 (load addr:$src), imm:$cc))]>;
1187 let Defs = [EFLAGS] in {
1188 def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1189 "ucomisd\t{$src2, $src1|$src1, $src2}",
1190 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1191 (implicit EFLAGS)]>;
1192 def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
1193 "ucomisd\t{$src2, $src1|$src1, $src2}",
1194 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1195 (implicit EFLAGS)]>;
1197 def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1198 "comisd\t{$src2, $src1|$src1, $src2}",
1199 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1200 (implicit EFLAGS)]>;
1201 def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
1202 "comisd\t{$src2, $src1|$src1, $src2}",
1203 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
1204 (implicit EFLAGS)]>;
1205 } // Defs = [EFLAGS]
1207 // Aliases of packed SSE2 instructions for scalar use. These all have names that
1210 // Alias instructions that map fld0 to pxor for sse.
1211 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
1212 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
1213 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
1214 Requires<[HasSSE2]>, TB, OpSize;
1216 // Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1218 let neverHasSideEffects = 1 in
1219 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1220 "movapd\t{$src, $dst|$dst, $src}", []>;
1222 // Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1224 let canFoldAsLoad = 1 in
1225 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1226 "movapd\t{$src, $dst|$dst, $src}",
1227 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1229 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1230 let Constraints = "$src1 = $dst" in {
1231 let isCommutable = 1 in {
1232 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst),
1233 (ins FR64:$src1, FR64:$src2),
1234 "andpd\t{$src2, $dst|$dst, $src2}",
1235 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
1236 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst),
1237 (ins FR64:$src1, FR64:$src2),
1238 "orpd\t{$src2, $dst|$dst, $src2}",
1239 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
1240 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst),
1241 (ins FR64:$src1, FR64:$src2),
1242 "xorpd\t{$src2, $dst|$dst, $src2}",
1243 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1246 def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst),
1247 (ins FR64:$src1, f128mem:$src2),
1248 "andpd\t{$src2, $dst|$dst, $src2}",
1249 [(set FR64:$dst, (X86fand FR64:$src1,
1250 (memopfsf64 addr:$src2)))]>;
1251 def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst),
1252 (ins FR64:$src1, f128mem:$src2),
1253 "orpd\t{$src2, $dst|$dst, $src2}",
1254 [(set FR64:$dst, (X86for FR64:$src1,
1255 (memopfsf64 addr:$src2)))]>;
1256 def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst),
1257 (ins FR64:$src1, f128mem:$src2),
1258 "xorpd\t{$src2, $dst|$dst, $src2}",
1259 [(set FR64:$dst, (X86fxor FR64:$src1,
1260 (memopfsf64 addr:$src2)))]>;
1262 let neverHasSideEffects = 1 in {
1263 def FsANDNPDrr : PDI<0x55, MRMSrcReg,
1264 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1265 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1267 def FsANDNPDrm : PDI<0x55, MRMSrcMem,
1268 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1269 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1273 /// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1275 /// In addition, we also have a special variant of the scalar form here to
1276 /// represent the associated intrinsic operation. This form is unlike the
1277 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1278 /// and leaves the top elements undefined.
1280 /// These three forms can each be reg+reg or reg+mem, so there are a total of
1281 /// six "instructions".
1283 let Constraints = "$src1 = $dst" in {
1284 multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1285 SDNode OpNode, Intrinsic F64Int,
1286 bit Commutable = 0> {
1287 // Scalar operation, reg+reg.
1288 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1289 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1290 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1291 let isCommutable = Commutable;
1294 // Scalar operation, reg+mem.
1295 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1296 (ins FR64:$src1, f64mem:$src2),
1297 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1298 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1300 // Vector operation, reg+reg.
1301 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
1302 (ins VR128:$src1, VR128:$src2),
1303 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1304 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1305 let isCommutable = Commutable;
1308 // Vector operation, reg+mem.
1309 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
1310 (ins VR128:$src1, f128mem:$src2),
1311 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1312 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1314 // Intrinsic operation, reg+reg.
1315 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
1316 (ins VR128:$src1, VR128:$src2),
1317 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1318 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1319 let isCommutable = Commutable;
1322 // Intrinsic operation, reg+mem.
1323 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
1324 (ins VR128:$src1, sdmem:$src2),
1325 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1326 [(set VR128:$dst, (F64Int VR128:$src1,
1327 sse_load_f64:$src2))]>;
1331 // Arithmetic instructions
1332 defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1333 defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1334 defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1335 defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1337 /// sse2_fp_binop_rm - Other SSE2 binops
1339 /// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1340 /// instructions for a full-vector intrinsic form. Operations that map
1341 /// onto C operators don't use this form since they just use the plain
1342 /// vector form instead of having a separate vector intrinsic form.
1344 /// This provides a total of eight "instructions".
1346 let Constraints = "$src1 = $dst" in {
1347 multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1351 bit Commutable = 0> {
1353 // Scalar operation, reg+reg.
1354 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1355 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1356 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1357 let isCommutable = Commutable;
1360 // Scalar operation, reg+mem.
1361 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1362 (ins FR64:$src1, f64mem:$src2),
1363 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1364 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1366 // Vector operation, reg+reg.
1367 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
1368 (ins VR128:$src1, VR128:$src2),
1369 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1370 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1371 let isCommutable = Commutable;
1374 // Vector operation, reg+mem.
1375 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
1376 (ins VR128:$src1, f128mem:$src2),
1377 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1378 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1380 // Intrinsic operation, reg+reg.
1381 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
1382 (ins VR128:$src1, VR128:$src2),
1383 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1384 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1385 let isCommutable = Commutable;
1388 // Intrinsic operation, reg+mem.
1389 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
1390 (ins VR128:$src1, sdmem:$src2),
1391 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1392 [(set VR128:$dst, (F64Int VR128:$src1,
1393 sse_load_f64:$src2))]>;
1395 // Vector intrinsic operation, reg+reg.
1396 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst),
1397 (ins VR128:$src1, VR128:$src2),
1398 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1399 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1400 let isCommutable = Commutable;
1403 // Vector intrinsic operation, reg+mem.
1404 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst),
1405 (ins VR128:$src1, f128mem:$src2),
1406 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1407 [(set VR128:$dst, (V2F64Int VR128:$src1,
1408 (memopv2f64 addr:$src2)))]>;
1412 defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1413 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1414 defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1415 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1417 //===----------------------------------------------------------------------===//
1418 // SSE packed FP Instructions
1420 // Move Instructions
1421 let neverHasSideEffects = 1 in
1422 def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1423 "movapd\t{$src, $dst|$dst, $src}", []>;
1424 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
1425 def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1426 "movapd\t{$src, $dst|$dst, $src}",
1427 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
1429 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1430 "movapd\t{$src, $dst|$dst, $src}",
1431 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
1433 let neverHasSideEffects = 1 in
1434 def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1435 "movupd\t{$src, $dst|$dst, $src}", []>;
1436 let canFoldAsLoad = 1 in
1437 def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1438 "movupd\t{$src, $dst|$dst, $src}",
1439 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
1440 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1441 "movupd\t{$src, $dst|$dst, $src}",
1442 [(store (v2f64 VR128:$src), addr:$dst)]>;
1444 // Intrinsic forms of MOVUPD load and store
1445 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1446 "movupd\t{$src, $dst|$dst, $src}",
1447 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
1448 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1449 "movupd\t{$src, $dst|$dst, $src}",
1450 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
1452 let Constraints = "$src1 = $dst" in {
1453 let AddedComplexity = 20 in {
1454 def MOVLPDrm : PDI<0x12, MRMSrcMem,
1455 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1456 "movlpd\t{$src2, $dst|$dst, $src2}",
1458 (v2f64 (vector_shuffle VR128:$src1,
1459 (scalar_to_vector (loadf64 addr:$src2)),
1460 MOVLP_shuffle_mask)))]>;
1461 def MOVHPDrm : PDI<0x16, MRMSrcMem,
1462 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1463 "movhpd\t{$src2, $dst|$dst, $src2}",
1465 (v2f64 (vector_shuffle VR128:$src1,
1466 (scalar_to_vector (loadf64 addr:$src2)),
1467 MOVHP_shuffle_mask)))]>;
1468 } // AddedComplexity
1469 } // Constraints = "$src1 = $dst"
1471 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1472 "movlpd\t{$src, $dst|$dst, $src}",
1473 [(store (f64 (vector_extract (v2f64 VR128:$src),
1474 (iPTR 0))), addr:$dst)]>;
1476 // v2f64 extract element 1 is always custom lowered to unpack high to low
1477 // and extract element 0 so the non-store version isn't too horrible.
1478 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1479 "movhpd\t{$src, $dst|$dst, $src}",
1480 [(store (f64 (vector_extract
1481 (v2f64 (vector_shuffle VR128:$src, (undef),
1482 UNPCKH_shuffle_mask)), (iPTR 0))),
1485 // SSE2 instructions without OpSize prefix
1486 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1487 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1488 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1489 TB, Requires<[HasSSE2]>;
1490 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1491 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1492 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1493 (bitconvert (memopv2i64 addr:$src))))]>,
1494 TB, Requires<[HasSSE2]>;
1496 // SSE2 instructions with XS prefix
1497 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1498 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1499 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1500 XS, Requires<[HasSSE2]>;
1501 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1502 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1503 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1504 (bitconvert (memopv2i64 addr:$src))))]>,
1505 XS, Requires<[HasSSE2]>;
1507 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1508 "cvtps2dq\t{$src, $dst|$dst, $src}",
1509 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1510 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1511 "cvtps2dq\t{$src, $dst|$dst, $src}",
1512 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1513 (memop addr:$src)))]>;
1514 // SSE2 packed instructions with XS prefix
1515 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1516 "cvttps2dq\t{$src, $dst|$dst, $src}",
1517 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1518 XS, Requires<[HasSSE2]>;
1519 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1520 "cvttps2dq\t{$src, $dst|$dst, $src}",
1521 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1522 (memop addr:$src)))]>,
1523 XS, Requires<[HasSSE2]>;
1525 // SSE2 packed instructions with XD prefix
1526 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1527 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1528 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1529 XD, Requires<[HasSSE2]>;
1530 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1531 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1532 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1533 (memop addr:$src)))]>,
1534 XD, Requires<[HasSSE2]>;
1536 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1537 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1538 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1539 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1540 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1541 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1542 (memop addr:$src)))]>;
1544 // SSE2 instructions without OpSize prefix
1545 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1546 "cvtps2pd\t{$src, $dst|$dst, $src}",
1547 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1548 TB, Requires<[HasSSE2]>;
1549 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1550 "cvtps2pd\t{$src, $dst|$dst, $src}",
1551 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1552 (load addr:$src)))]>,
1553 TB, Requires<[HasSSE2]>;
1555 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1556 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1557 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1558 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1559 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1560 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1561 (memop addr:$src)))]>;
1563 // Match intrinsics which expect XMM operand(s).
1564 // Aliases for intrinsics
1565 let Constraints = "$src1 = $dst" in {
1566 def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
1567 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
1568 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1569 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1571 def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
1572 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
1573 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1574 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1575 (loadi32 addr:$src2)))]>;
1576 def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
1577 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1578 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1579 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1581 def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
1582 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1583 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1584 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1585 (load addr:$src2)))]>;
1586 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1587 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1588 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1589 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1590 VR128:$src2))]>, XS,
1591 Requires<[HasSSE2]>;
1592 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1593 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1594 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1595 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1596 (load addr:$src2)))]>, XS,
1597 Requires<[HasSSE2]>;
1602 /// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1604 /// In addition, we also have a special variant of the scalar form here to
1605 /// represent the associated intrinsic operation. This form is unlike the
1606 /// plain scalar form, in that it takes an entire vector (instead of a
1607 /// scalar) and leaves the top elements undefined.
1609 /// And, we have a special variant form for a full-vector intrinsic form.
1611 /// These four forms can each have a reg or a mem operand, so there are a
1612 /// total of eight "instructions".
1614 multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1618 bit Commutable = 0> {
1619 // Scalar operation, reg.
1620 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1621 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1622 [(set FR64:$dst, (OpNode FR64:$src))]> {
1623 let isCommutable = Commutable;
1626 // Scalar operation, mem.
1627 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1628 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1629 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1631 // Vector operation, reg.
1632 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1633 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1634 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1635 let isCommutable = Commutable;
1638 // Vector operation, mem.
1639 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1640 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1641 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1643 // Intrinsic operation, reg.
1644 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1645 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1646 [(set VR128:$dst, (F64Int VR128:$src))]> {
1647 let isCommutable = Commutable;
1650 // Intrinsic operation, mem.
1651 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1652 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1653 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1655 // Vector intrinsic operation, reg
1656 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1657 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1658 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1659 let isCommutable = Commutable;
1662 // Vector intrinsic operation, mem
1663 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1664 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1665 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1669 defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1670 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1672 // There is no f64 version of the reciprocal approximation instructions.
1675 let Constraints = "$src1 = $dst" in {
1676 let isCommutable = 1 in {
1677 def ANDPDrr : PDI<0x54, MRMSrcReg,
1678 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1679 "andpd\t{$src2, $dst|$dst, $src2}",
1681 (and (bc_v2i64 (v2f64 VR128:$src1)),
1682 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1683 def ORPDrr : PDI<0x56, MRMSrcReg,
1684 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1685 "orpd\t{$src2, $dst|$dst, $src2}",
1687 (or (bc_v2i64 (v2f64 VR128:$src1)),
1688 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1689 def XORPDrr : PDI<0x57, MRMSrcReg,
1690 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1691 "xorpd\t{$src2, $dst|$dst, $src2}",
1693 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1694 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1697 def ANDPDrm : PDI<0x54, MRMSrcMem,
1698 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1699 "andpd\t{$src2, $dst|$dst, $src2}",
1701 (and (bc_v2i64 (v2f64 VR128:$src1)),
1702 (memopv2i64 addr:$src2)))]>;
1703 def ORPDrm : PDI<0x56, MRMSrcMem,
1704 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1705 "orpd\t{$src2, $dst|$dst, $src2}",
1707 (or (bc_v2i64 (v2f64 VR128:$src1)),
1708 (memopv2i64 addr:$src2)))]>;
1709 def XORPDrm : PDI<0x57, MRMSrcMem,
1710 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1711 "xorpd\t{$src2, $dst|$dst, $src2}",
1713 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1714 (memopv2i64 addr:$src2)))]>;
1715 def ANDNPDrr : PDI<0x55, MRMSrcReg,
1716 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1717 "andnpd\t{$src2, $dst|$dst, $src2}",
1719 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1720 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1721 def ANDNPDrm : PDI<0x55, MRMSrcMem,
1722 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
1723 "andnpd\t{$src2, $dst|$dst, $src2}",
1725 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1726 (memopv2i64 addr:$src2)))]>;
1729 let Constraints = "$src1 = $dst" in {
1730 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
1731 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1732 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1733 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1734 VR128:$src, imm:$cc))]>;
1735 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
1736 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
1737 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1738 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1739 (memop addr:$src), imm:$cc))]>;
1741 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1742 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1743 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1744 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1746 // Shuffle and unpack instructions
1747 let Constraints = "$src1 = $dst" in {
1748 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
1749 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
1750 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1751 [(set VR128:$dst, (v2f64 (vector_shuffle
1752 VR128:$src1, VR128:$src2,
1753 SHUFP_shuffle_mask:$src3)))]>;
1754 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
1755 (outs VR128:$dst), (ins VR128:$src1,
1756 f128mem:$src2, i8imm:$src3),
1757 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1759 (v2f64 (vector_shuffle
1760 VR128:$src1, (memopv2f64 addr:$src2),
1761 SHUFP_shuffle_mask:$src3)))]>;
1763 let AddedComplexity = 10 in {
1764 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
1765 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1766 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1768 (v2f64 (vector_shuffle
1769 VR128:$src1, VR128:$src2,
1770 UNPCKH_shuffle_mask)))]>;
1771 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
1772 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1773 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1775 (v2f64 (vector_shuffle
1776 VR128:$src1, (memopv2f64 addr:$src2),
1777 UNPCKH_shuffle_mask)))]>;
1779 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
1780 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1781 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1783 (v2f64 (vector_shuffle
1784 VR128:$src1, VR128:$src2,
1785 UNPCKL_shuffle_mask)))]>;
1786 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
1787 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1788 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1790 (v2f64 (vector_shuffle
1791 VR128:$src1, (memopv2f64 addr:$src2),
1792 UNPCKL_shuffle_mask)))]>;
1793 } // AddedComplexity
1794 } // Constraints = "$src1 = $dst"
1797 //===----------------------------------------------------------------------===//
1798 // SSE integer instructions
1800 // Move Instructions
1801 let neverHasSideEffects = 1 in
1802 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1803 "movdqa\t{$src, $dst|$dst, $src}", []>;
1804 let canFoldAsLoad = 1, mayLoad = 1 in
1805 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1806 "movdqa\t{$src, $dst|$dst, $src}",
1807 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
1809 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1810 "movdqa\t{$src, $dst|$dst, $src}",
1811 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
1812 let canFoldAsLoad = 1, mayLoad = 1 in
1813 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1814 "movdqu\t{$src, $dst|$dst, $src}",
1815 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
1816 XS, Requires<[HasSSE2]>;
1818 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1819 "movdqu\t{$src, $dst|$dst, $src}",
1820 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
1821 XS, Requires<[HasSSE2]>;
1823 // Intrinsic forms of MOVDQU load and store
1824 let canFoldAsLoad = 1 in
1825 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1826 "movdqu\t{$src, $dst|$dst, $src}",
1827 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1828 XS, Requires<[HasSSE2]>;
1829 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1830 "movdqu\t{$src, $dst|$dst, $src}",
1831 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1832 XS, Requires<[HasSSE2]>;
1834 let Constraints = "$src1 = $dst" in {
1836 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1837 bit Commutable = 0> {
1838 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1839 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1840 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1841 let isCommutable = Commutable;
1843 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1844 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1845 [(set VR128:$dst, (IntId VR128:$src1,
1846 (bitconvert (memopv2i64 addr:$src2))))]>;
1849 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1851 Intrinsic IntId, Intrinsic IntId2> {
1852 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1853 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1854 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
1855 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1856 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1857 [(set VR128:$dst, (IntId VR128:$src1,
1858 (bitconvert (memopv2i64 addr:$src2))))]>;
1859 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1860 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1861 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
1864 /// PDI_binop_rm - Simple SSE2 binary operator.
1865 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1866 ValueType OpVT, bit Commutable = 0> {
1867 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1868 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1869 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1870 let isCommutable = Commutable;
1872 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1873 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1874 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
1875 (bitconvert (memopv2i64 addr:$src2)))))]>;
1878 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1880 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1881 /// to collapse (bitconvert VT to VT) into its operand.
1883 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1884 bit Commutable = 0> {
1885 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1886 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1887 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1888 let isCommutable = Commutable;
1890 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1891 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1892 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
1895 } // Constraints = "$src1 = $dst"
1897 // 128-bit Integer Arithmetic
1899 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1900 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1901 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1902 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1904 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1905 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1906 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1907 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1909 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1910 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1911 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1912 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1914 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1915 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1916 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1917 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1919 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1921 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1922 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1923 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1925 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1927 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1928 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1931 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1932 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1933 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1934 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1935 defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1938 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
1939 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
1940 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
1941 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
1942 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
1943 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
1945 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
1946 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
1947 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
1948 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
1949 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
1950 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
1952 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
1953 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
1954 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
1955 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
1957 // 128-bit logical shifts.
1958 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1959 def PSLLDQri : PDIi8<0x73, MRM7r,
1960 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1961 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
1962 def PSRLDQri : PDIi8<0x73, MRM3r,
1963 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1964 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
1965 // PSRADQri doesn't exist in SSE[1-3].
1968 let Predicates = [HasSSE2] in {
1969 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1970 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1971 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1972 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1973 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
1974 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
1975 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
1976 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
1977 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1978 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1980 // Shift up / down and insert zero's.
1981 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
1982 (v2i64 (PSLLDQri VR128:$src, (PSxLDQ_imm imm:$amt)))>;
1983 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
1984 (v2i64 (PSRLDQri VR128:$src, (PSxLDQ_imm imm:$amt)))>;
1988 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1989 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1990 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1992 let Constraints = "$src1 = $dst" in {
1993 def PANDNrr : PDI<0xDF, MRMSrcReg,
1994 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1995 "pandn\t{$src2, $dst|$dst, $src2}",
1996 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1999 def PANDNrm : PDI<0xDF, MRMSrcMem,
2000 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2001 "pandn\t{$src2, $dst|$dst, $src2}",
2002 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2003 (memopv2i64 addr:$src2))))]>;
2006 // SSE2 Integer comparison
2007 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
2008 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
2009 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
2010 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2011 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2012 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2014 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2015 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2016 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2017 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2018 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2019 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2020 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2021 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2022 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2023 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2024 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2025 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2027 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2028 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2029 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2030 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2031 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2032 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2033 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2034 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2035 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2036 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2037 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2038 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2041 // Pack instructions
2042 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2043 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2044 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2046 // Shuffle and unpack instructions
2047 def PSHUFDri : PDIi8<0x70, MRMSrcReg,
2048 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2049 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2050 [(set VR128:$dst, (v4i32 (vector_shuffle
2051 VR128:$src1, (undef),
2052 PSHUFD_shuffle_mask:$src2)))]>;
2053 def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
2054 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2055 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2056 [(set VR128:$dst, (v4i32 (vector_shuffle
2057 (bc_v4i32(memopv2i64 addr:$src1)),
2059 PSHUFD_shuffle_mask:$src2)))]>;
2061 // SSE2 with ImmT == Imm8 and XS prefix.
2062 def PSHUFHWri : Ii8<0x70, MRMSrcReg,
2063 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2064 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2065 [(set VR128:$dst, (v8i16 (vector_shuffle
2066 VR128:$src1, (undef),
2067 PSHUFHW_shuffle_mask:$src2)))]>,
2068 XS, Requires<[HasSSE2]>;
2069 def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
2070 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2071 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2072 [(set VR128:$dst, (v8i16 (vector_shuffle
2073 (bc_v8i16 (memopv2i64 addr:$src1)),
2075 PSHUFHW_shuffle_mask:$src2)))]>,
2076 XS, Requires<[HasSSE2]>;
2078 // SSE2 with ImmT == Imm8 and XD prefix.
2079 def PSHUFLWri : Ii8<0x70, MRMSrcReg,
2080 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2081 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2082 [(set VR128:$dst, (v8i16 (vector_shuffle
2083 VR128:$src1, (undef),
2084 PSHUFLW_shuffle_mask:$src2)))]>,
2085 XD, Requires<[HasSSE2]>;
2086 def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
2087 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
2088 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2089 [(set VR128:$dst, (v8i16 (vector_shuffle
2090 (bc_v8i16 (memopv2i64 addr:$src1)),
2092 PSHUFLW_shuffle_mask:$src2)))]>,
2093 XD, Requires<[HasSSE2]>;
2096 let Constraints = "$src1 = $dst" in {
2097 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
2098 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2099 "punpcklbw\t{$src2, $dst|$dst, $src2}",
2101 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2102 UNPCKL_shuffle_mask)))]>;
2103 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
2104 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2105 "punpcklbw\t{$src2, $dst|$dst, $src2}",
2107 (v16i8 (vector_shuffle VR128:$src1,
2108 (bc_v16i8 (memopv2i64 addr:$src2)),
2109 UNPCKL_shuffle_mask)))]>;
2110 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
2111 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2112 "punpcklwd\t{$src2, $dst|$dst, $src2}",
2114 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2115 UNPCKL_shuffle_mask)))]>;
2116 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
2117 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2118 "punpcklwd\t{$src2, $dst|$dst, $src2}",
2120 (v8i16 (vector_shuffle VR128:$src1,
2121 (bc_v8i16 (memopv2i64 addr:$src2)),
2122 UNPCKL_shuffle_mask)))]>;
2123 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
2124 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2125 "punpckldq\t{$src2, $dst|$dst, $src2}",
2127 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2128 UNPCKL_shuffle_mask)))]>;
2129 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
2130 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2131 "punpckldq\t{$src2, $dst|$dst, $src2}",
2133 (v4i32 (vector_shuffle VR128:$src1,
2134 (bc_v4i32 (memopv2i64 addr:$src2)),
2135 UNPCKL_shuffle_mask)))]>;
2136 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2137 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2138 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2140 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2141 UNPCKL_shuffle_mask)))]>;
2142 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2143 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2144 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2146 (v2i64 (vector_shuffle VR128:$src1,
2147 (memopv2i64 addr:$src2),
2148 UNPCKL_shuffle_mask)))]>;
2150 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
2151 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2152 "punpckhbw\t{$src2, $dst|$dst, $src2}",
2154 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2155 UNPCKH_shuffle_mask)))]>;
2156 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
2157 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2158 "punpckhbw\t{$src2, $dst|$dst, $src2}",
2160 (v16i8 (vector_shuffle VR128:$src1,
2161 (bc_v16i8 (memopv2i64 addr:$src2)),
2162 UNPCKH_shuffle_mask)))]>;
2163 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
2164 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2165 "punpckhwd\t{$src2, $dst|$dst, $src2}",
2167 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2168 UNPCKH_shuffle_mask)))]>;
2169 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
2170 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2171 "punpckhwd\t{$src2, $dst|$dst, $src2}",
2173 (v8i16 (vector_shuffle VR128:$src1,
2174 (bc_v8i16 (memopv2i64 addr:$src2)),
2175 UNPCKH_shuffle_mask)))]>;
2176 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
2177 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2178 "punpckhdq\t{$src2, $dst|$dst, $src2}",
2180 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2181 UNPCKH_shuffle_mask)))]>;
2182 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
2183 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2184 "punpckhdq\t{$src2, $dst|$dst, $src2}",
2186 (v4i32 (vector_shuffle VR128:$src1,
2187 (bc_v4i32 (memopv2i64 addr:$src2)),
2188 UNPCKH_shuffle_mask)))]>;
2189 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2190 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2191 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2193 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2194 UNPCKH_shuffle_mask)))]>;
2195 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2196 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2197 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2199 (v2i64 (vector_shuffle VR128:$src1,
2200 (memopv2i64 addr:$src2),
2201 UNPCKH_shuffle_mask)))]>;
2205 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2206 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2207 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2208 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2210 let Constraints = "$src1 = $dst" in {
2211 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
2212 (outs VR128:$dst), (ins VR128:$src1,
2213 GR32:$src2, i32i8imm:$src3),
2214 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2216 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2217 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
2218 (outs VR128:$dst), (ins VR128:$src1,
2219 i16mem:$src2, i32i8imm:$src3),
2220 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2222 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2227 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2228 "pmovmskb\t{$src, $dst|$dst, $src}",
2229 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2231 // Conditional store
2233 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2234 "maskmovdqu\t{$mask, $src|$src, $mask}",
2235 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2238 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2239 "maskmovdqu\t{$mask, $src|$src, $mask}",
2240 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2242 // Non-temporal stores
2243 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2244 "movntpd\t{$src, $dst|$dst, $src}",
2245 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2246 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2247 "movntdq\t{$src, $dst|$dst, $src}",
2248 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2249 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2250 "movnti\t{$src, $dst|$dst, $src}",
2251 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2252 TB, Requires<[HasSSE2]>;
2255 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
2256 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
2257 TB, Requires<[HasSSE2]>;
2259 // Load, store, and memory fence
2260 def LFENCE : I<0xAE, MRM5r, (outs), (ins),
2261 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
2262 def MFENCE : I<0xAE, MRM6r, (outs), (ins),
2263 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2265 //TODO: custom lower this so as to never even generate the noop
2266 def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2268 def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
2269 def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
2270 def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2273 // Alias instructions that map zero vector to pxor / xorp* for sse.
2274 // We set canFoldAsLoad because this can be converted to a constant-pool
2275 // load of an all-ones value if folding it would be beneficial.
2276 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1 in
2277 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
2278 "pcmpeqd\t$dst, $dst",
2279 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
2281 // FR64 to 128-bit vector conversion.
2282 let isAsCheapAsAMove = 1 in
2283 def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
2284 "movsd\t{$src, $dst|$dst, $src}",
2286 (v2f64 (scalar_to_vector FR64:$src)))]>;
2287 def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2288 "movsd\t{$src, $dst|$dst, $src}",
2290 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2292 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2293 "movd\t{$src, $dst|$dst, $src}",
2295 (v4i32 (scalar_to_vector GR32:$src)))]>;
2296 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2297 "movd\t{$src, $dst|$dst, $src}",
2299 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2301 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2302 "movd\t{$src, $dst|$dst, $src}",
2303 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2305 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2306 "movd\t{$src, $dst|$dst, $src}",
2307 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2309 // SSE2 instructions with XS prefix
2310 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2311 "movq\t{$src, $dst|$dst, $src}",
2313 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2314 Requires<[HasSSE2]>;
2315 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2316 "movq\t{$src, $dst|$dst, $src}",
2317 [(store (i64 (vector_extract (v2i64 VR128:$src),
2318 (iPTR 0))), addr:$dst)]>;
2320 // FIXME: may not be able to eliminate this movss with coalescing the src and
2321 // dest register classes are different. We really want to write this pattern
2323 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2324 // (f32 FR32:$src)>;
2325 let isAsCheapAsAMove = 1 in
2326 def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
2327 "movsd\t{$src, $dst|$dst, $src}",
2328 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2330 def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
2331 "movsd\t{$src, $dst|$dst, $src}",
2332 [(store (f64 (vector_extract (v2f64 VR128:$src),
2333 (iPTR 0))), addr:$dst)]>;
2334 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2335 "movd\t{$src, $dst|$dst, $src}",
2336 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2338 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2339 "movd\t{$src, $dst|$dst, $src}",
2340 [(store (i32 (vector_extract (v4i32 VR128:$src),
2341 (iPTR 0))), addr:$dst)]>;
2343 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2344 "movd\t{$src, $dst|$dst, $src}",
2345 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2346 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2347 "movd\t{$src, $dst|$dst, $src}",
2348 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2351 // Move to lower bits of a VR128, leaving upper bits alone.
2352 // Three operand (but two address) aliases.
2353 let Constraints = "$src1 = $dst" in {
2354 let neverHasSideEffects = 1 in
2355 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
2356 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
2357 "movsd\t{$src2, $dst|$dst, $src2}", []>;
2359 let AddedComplexity = 15 in
2360 def MOVLPDrr : SDI<0x10, MRMSrcReg,
2361 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2362 "movsd\t{$src2, $dst|$dst, $src2}",
2364 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2365 MOVL_shuffle_mask)))]>;
2368 // Store / copy lower 64-bits of a XMM register.
2369 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2370 "movq\t{$src, $dst|$dst, $src}",
2371 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2373 // Move to lower bits of a VR128 and zeroing upper bits.
2374 // Loading from memory automatically zeroing upper bits.
2375 let AddedComplexity = 20 in {
2376 def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2377 "movsd\t{$src, $dst|$dst, $src}",
2379 (v2f64 (X86vzmovl (v2f64 (scalar_to_vector
2380 (loadf64 addr:$src))))))]>;
2382 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
2383 (MOVZSD2PDrm addr:$src)>;
2384 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
2385 (MOVZSD2PDrm addr:$src)>;
2386 def : Pat<(v2f64 (X86vzload addr:$src)), (MOVZSD2PDrm addr:$src)>;
2389 // movd / movq to XMM register zero-extends
2390 let AddedComplexity = 15 in {
2391 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2392 "movd\t{$src, $dst|$dst, $src}",
2393 [(set VR128:$dst, (v4i32 (X86vzmovl
2394 (v4i32 (scalar_to_vector GR32:$src)))))]>;
2395 // This is X86-64 only.
2396 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2397 "mov{d|q}\t{$src, $dst|$dst, $src}",
2398 [(set VR128:$dst, (v2i64 (X86vzmovl
2399 (v2i64 (scalar_to_vector GR64:$src)))))]>;
2402 let AddedComplexity = 20 in {
2403 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2404 "movd\t{$src, $dst|$dst, $src}",
2406 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
2407 (loadi32 addr:$src))))))]>;
2409 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
2410 (MOVZDI2PDIrm addr:$src)>;
2411 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2412 (MOVZDI2PDIrm addr:$src)>;
2413 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
2414 (MOVZDI2PDIrm addr:$src)>;
2416 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2417 "movq\t{$src, $dst|$dst, $src}",
2419 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
2420 (loadi64 addr:$src))))))]>, XS,
2421 Requires<[HasSSE2]>;
2423 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2424 (MOVZQI2PQIrm addr:$src)>;
2425 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
2426 (MOVZQI2PQIrm addr:$src)>;
2427 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
2430 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
2431 // IA32 document. movq xmm1, xmm2 does clear the high bits.
2432 let AddedComplexity = 15 in
2433 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2434 "movq\t{$src, $dst|$dst, $src}",
2435 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
2436 XS, Requires<[HasSSE2]>;
2438 let AddedComplexity = 20 in {
2439 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2440 "movq\t{$src, $dst|$dst, $src}",
2441 [(set VR128:$dst, (v2i64 (X86vzmovl
2442 (loadv2i64 addr:$src))))]>,
2443 XS, Requires<[HasSSE2]>;
2445 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
2446 (MOVZPQILo2PQIrm addr:$src)>;
2449 //===----------------------------------------------------------------------===//
2450 // SSE3 Instructions
2451 //===----------------------------------------------------------------------===//
2453 // Move Instructions
2454 def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2455 "movshdup\t{$src, $dst|$dst, $src}",
2456 [(set VR128:$dst, (v4f32 (vector_shuffle
2457 VR128:$src, (undef),
2458 MOVSHDUP_shuffle_mask)))]>;
2459 def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2460 "movshdup\t{$src, $dst|$dst, $src}",
2461 [(set VR128:$dst, (v4f32 (vector_shuffle
2462 (memopv4f32 addr:$src), (undef),
2463 MOVSHDUP_shuffle_mask)))]>;
2465 def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2466 "movsldup\t{$src, $dst|$dst, $src}",
2467 [(set VR128:$dst, (v4f32 (vector_shuffle
2468 VR128:$src, (undef),
2469 MOVSLDUP_shuffle_mask)))]>;
2470 def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2471 "movsldup\t{$src, $dst|$dst, $src}",
2472 [(set VR128:$dst, (v4f32 (vector_shuffle
2473 (memopv4f32 addr:$src), (undef),
2474 MOVSLDUP_shuffle_mask)))]>;
2476 def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2477 "movddup\t{$src, $dst|$dst, $src}",
2479 (v2f64 (vector_shuffle VR128:$src, (undef),
2480 MOVDDUP_shuffle_mask)))]>;
2481 def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2482 "movddup\t{$src, $dst|$dst, $src}",
2484 (v2f64 (vector_shuffle
2485 (scalar_to_vector (loadf64 addr:$src)),
2486 (undef), MOVDDUP_shuffle_mask)))]>;
2488 def : Pat<(vector_shuffle
2489 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
2490 (undef), MOVDDUP_shuffle_mask),
2491 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
2492 def : Pat<(vector_shuffle
2493 (memopv2f64 addr:$src), (undef), MOVDDUP_shuffle_mask),
2494 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
2498 let Constraints = "$src1 = $dst" in {
2499 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
2500 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2501 "addsubps\t{$src2, $dst|$dst, $src2}",
2502 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2504 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
2505 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2506 "addsubps\t{$src2, $dst|$dst, $src2}",
2507 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2508 (memop addr:$src2)))]>;
2509 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
2510 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2511 "addsubpd\t{$src2, $dst|$dst, $src2}",
2512 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2514 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
2515 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2516 "addsubpd\t{$src2, $dst|$dst, $src2}",
2517 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2518 (memop addr:$src2)))]>;
2521 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2522 "lddqu\t{$src, $dst|$dst, $src}",
2523 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2526 class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2527 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2528 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2529 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2530 class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2531 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2532 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2533 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
2534 class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2535 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2536 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2537 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2538 class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2539 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2540 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2541 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
2543 let Constraints = "$src1 = $dst" in {
2544 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2545 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2546 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2547 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2548 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2549 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2550 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2551 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2554 // Thread synchronization
2555 def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
2556 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
2557 def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
2558 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2560 // vector_shuffle v1, <undef> <1, 1, 3, 3>
2561 let AddedComplexity = 15 in
2562 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2563 MOVSHDUP_shuffle_mask)),
2564 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2565 let AddedComplexity = 20 in
2566 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2567 MOVSHDUP_shuffle_mask)),
2568 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2570 // vector_shuffle v1, <undef> <0, 0, 2, 2>
2571 let AddedComplexity = 15 in
2572 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2573 MOVSLDUP_shuffle_mask)),
2574 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2575 let AddedComplexity = 20 in
2576 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2577 MOVSLDUP_shuffle_mask)),
2578 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2580 //===----------------------------------------------------------------------===//
2581 // SSSE3 Instructions
2582 //===----------------------------------------------------------------------===//
2584 /// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
2585 multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2586 Intrinsic IntId64, Intrinsic IntId128> {
2587 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2588 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2589 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2591 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2592 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2594 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2596 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2598 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2599 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2602 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2604 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2607 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
2610 /// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
2611 multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2612 Intrinsic IntId64, Intrinsic IntId128> {
2613 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2615 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2616 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2618 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2620 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2623 (bitconvert (memopv4i16 addr:$src))))]>;
2625 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2627 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2628 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2631 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2633 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2636 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
2639 /// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
2640 multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2641 Intrinsic IntId64, Intrinsic IntId128> {
2642 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2644 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2645 [(set VR64:$dst, (IntId64 VR64:$src))]>;
2647 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2649 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2652 (bitconvert (memopv2i32 addr:$src))))]>;
2654 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2656 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2657 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2660 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2662 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2665 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
2668 defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2669 int_x86_ssse3_pabs_b,
2670 int_x86_ssse3_pabs_b_128>;
2671 defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2672 int_x86_ssse3_pabs_w,
2673 int_x86_ssse3_pabs_w_128>;
2674 defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2675 int_x86_ssse3_pabs_d,
2676 int_x86_ssse3_pabs_d_128>;
2678 /// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
2679 let Constraints = "$src1 = $dst" in {
2680 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2681 Intrinsic IntId64, Intrinsic IntId128,
2682 bit Commutable = 0> {
2683 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2684 (ins VR64:$src1, VR64:$src2),
2685 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2686 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2687 let isCommutable = Commutable;
2689 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2690 (ins VR64:$src1, i64mem:$src2),
2691 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2693 (IntId64 VR64:$src1,
2694 (bitconvert (memopv8i8 addr:$src2))))]>;
2696 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2697 (ins VR128:$src1, VR128:$src2),
2698 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2699 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2701 let isCommutable = Commutable;
2703 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2704 (ins VR128:$src1, i128mem:$src2),
2705 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2707 (IntId128 VR128:$src1,
2708 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2712 /// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
2713 let Constraints = "$src1 = $dst" in {
2714 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2715 Intrinsic IntId64, Intrinsic IntId128,
2716 bit Commutable = 0> {
2717 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2718 (ins VR64:$src1, VR64:$src2),
2719 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2720 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2721 let isCommutable = Commutable;
2723 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2724 (ins VR64:$src1, i64mem:$src2),
2725 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2727 (IntId64 VR64:$src1,
2728 (bitconvert (memopv4i16 addr:$src2))))]>;
2730 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2731 (ins VR128:$src1, VR128:$src2),
2732 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2733 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2735 let isCommutable = Commutable;
2737 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2738 (ins VR128:$src1, i128mem:$src2),
2739 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2741 (IntId128 VR128:$src1,
2742 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2746 /// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
2747 let Constraints = "$src1 = $dst" in {
2748 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2749 Intrinsic IntId64, Intrinsic IntId128,
2750 bit Commutable = 0> {
2751 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2752 (ins VR64:$src1, VR64:$src2),
2753 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2754 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2755 let isCommutable = Commutable;
2757 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2758 (ins VR64:$src1, i64mem:$src2),
2759 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2761 (IntId64 VR64:$src1,
2762 (bitconvert (memopv2i32 addr:$src2))))]>;
2764 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2765 (ins VR128:$src1, VR128:$src2),
2766 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2767 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2769 let isCommutable = Commutable;
2771 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2772 (ins VR128:$src1, i128mem:$src2),
2773 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2775 (IntId128 VR128:$src1,
2776 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2780 defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2781 int_x86_ssse3_phadd_w,
2782 int_x86_ssse3_phadd_w_128>;
2783 defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2784 int_x86_ssse3_phadd_d,
2785 int_x86_ssse3_phadd_d_128>;
2786 defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2787 int_x86_ssse3_phadd_sw,
2788 int_x86_ssse3_phadd_sw_128>;
2789 defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2790 int_x86_ssse3_phsub_w,
2791 int_x86_ssse3_phsub_w_128>;
2792 defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2793 int_x86_ssse3_phsub_d,
2794 int_x86_ssse3_phsub_d_128>;
2795 defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2796 int_x86_ssse3_phsub_sw,
2797 int_x86_ssse3_phsub_sw_128>;
2798 defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2799 int_x86_ssse3_pmadd_ub_sw,
2800 int_x86_ssse3_pmadd_ub_sw_128>;
2801 defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2802 int_x86_ssse3_pmul_hr_sw,
2803 int_x86_ssse3_pmul_hr_sw_128, 1>;
2804 defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2805 int_x86_ssse3_pshuf_b,
2806 int_x86_ssse3_pshuf_b_128>;
2807 defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2808 int_x86_ssse3_psign_b,
2809 int_x86_ssse3_psign_b_128>;
2810 defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2811 int_x86_ssse3_psign_w,
2812 int_x86_ssse3_psign_w_128>;
2813 defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2814 int_x86_ssse3_psign_d,
2815 int_x86_ssse3_psign_d_128>;
2817 let Constraints = "$src1 = $dst" in {
2818 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2819 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
2820 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2822 (int_x86_ssse3_palign_r
2823 VR64:$src1, VR64:$src2,
2825 def PALIGNR64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
2826 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
2827 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2829 (int_x86_ssse3_palign_r
2831 (bitconvert (memopv2i32 addr:$src2)),
2834 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2835 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
2836 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2838 (int_x86_ssse3_palign_r_128
2839 VR128:$src1, VR128:$src2,
2840 imm:$src3))]>, OpSize;
2841 def PALIGNR128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
2842 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
2843 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2845 (int_x86_ssse3_palign_r_128
2847 (bitconvert (memopv4i32 addr:$src2)),
2848 imm:$src3))]>, OpSize;
2851 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
2852 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
2853 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
2854 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
2856 //===----------------------------------------------------------------------===//
2857 // Non-Instruction Patterns
2858 //===----------------------------------------------------------------------===//
2860 // extload f32 -> f64. This matches load+fextend because we have a hack in
2861 // the isel (PreprocessForFPConvert) that can introduce loads after dag combine.
2862 // Since these loads aren't folded into the fextend, we have to match it
2864 let Predicates = [HasSSE2] in
2865 def : Pat<(fextend (loadf32 addr:$src)),
2866 (CVTSS2SDrm addr:$src)>;
2869 let Predicates = [HasSSE2] in {
2870 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2871 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2872 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2873 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2874 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2875 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2876 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2877 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2878 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2879 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2880 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2881 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2882 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2883 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2884 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2885 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2886 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2887 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2888 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2889 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2890 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2891 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2892 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2893 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2894 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2895 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2896 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2897 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2898 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2899 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2902 // Move scalar to XMM zero-extended
2903 // movd to XMM register zero-extends
2904 let AddedComplexity = 15 in {
2905 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
2906 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
2907 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
2908 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
2909 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE1]>;
2910 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
2911 (MOVLPSrr (V_SET0), VR128:$src)>, Requires<[HasSSE1]>;
2912 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
2913 (MOVLPSrr (V_SET0), VR128:$src)>, Requires<[HasSSE1]>;
2916 // Splat v2f64 / v2i64
2917 let AddedComplexity = 10 in {
2918 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2919 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2920 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2921 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2922 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2923 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2924 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2925 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2928 // Special unary SHUFPSrri case.
2929 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2930 SHUFP_unary_shuffle_mask:$sm)),
2931 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2932 Requires<[HasSSE1]>;
2933 // Special unary SHUFPDrri case.
2934 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (undef),
2935 SHUFP_unary_shuffle_mask:$sm)),
2936 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2937 Requires<[HasSSE2]>;
2938 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
2939 def : Pat<(vector_shuffle (bc_v4i32 (memopv4f32 addr:$src1)), (undef),
2940 SHUFP_unary_shuffle_mask:$sm),
2941 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2942 Requires<[HasSSE2]>;
2944 // Special binary v4i32 shuffle cases with SHUFPS.
2945 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (v4i32 VR128:$src2),
2946 PSHUFD_binary_shuffle_mask:$sm)),
2947 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2948 Requires<[HasSSE2]>;
2949 def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2950 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm)),
2951 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2952 Requires<[HasSSE2]>;
2953 // Special binary v2i64 shuffle cases using SHUFPDrri.
2954 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2955 SHUFP_shuffle_mask:$sm)),
2956 (SHUFPDrri VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$sm)>,
2957 Requires<[HasSSE2]>;
2958 // Special unary SHUFPDrri case.
2959 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (undef),
2960 SHUFP_unary_shuffle_mask:$sm)),
2961 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2962 Requires<[HasSSE2]>;
2964 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2965 let AddedComplexity = 15 in {
2966 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2967 UNPCKL_v_undef_shuffle_mask:$sm)),
2968 (PSHUFDri VR128:$src, PSHUFD_shuffle_mask:$sm)>,
2969 Requires<[OptForSpeed, HasSSE2]>;
2970 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2971 UNPCKL_v_undef_shuffle_mask:$sm)),
2972 (PSHUFDri VR128:$src, PSHUFD_shuffle_mask:$sm)>,
2973 Requires<[OptForSpeed, HasSSE2]>;
2975 let AddedComplexity = 10 in {
2976 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2977 UNPCKL_v_undef_shuffle_mask)),
2978 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2979 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2980 UNPCKL_v_undef_shuffle_mask)),
2981 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2982 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2983 UNPCKL_v_undef_shuffle_mask)),
2984 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2985 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2986 UNPCKL_v_undef_shuffle_mask)),
2987 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2990 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2991 let AddedComplexity = 15 in {
2992 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2993 UNPCKH_v_undef_shuffle_mask:$sm)),
2994 (PSHUFDri VR128:$src, PSHUFD_shuffle_mask:$sm)>,
2995 Requires<[OptForSpeed, HasSSE2]>;
2996 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2997 UNPCKH_v_undef_shuffle_mask:$sm)),
2998 (PSHUFDri VR128:$src, PSHUFD_shuffle_mask:$sm)>,
2999 Requires<[OptForSpeed, HasSSE2]>;
3001 let AddedComplexity = 10 in {
3002 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
3003 UNPCKH_v_undef_shuffle_mask)),
3004 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
3005 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
3006 UNPCKH_v_undef_shuffle_mask)),
3007 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3008 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
3009 UNPCKH_v_undef_shuffle_mask)),
3010 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3011 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
3012 UNPCKH_v_undef_shuffle_mask)),
3013 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3016 let AddedComplexity = 20 in {
3017 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3018 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
3019 MOVHP_shuffle_mask)),
3020 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3022 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3023 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
3024 MOVHLPS_shuffle_mask)),
3025 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3027 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3028 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
3029 MOVHLPS_v_undef_shuffle_mask)),
3030 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3031 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
3032 MOVHLPS_v_undef_shuffle_mask)),
3033 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3036 let AddedComplexity = 20 in {
3037 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3038 // vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
3039 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (load addr:$src2),
3040 MOVLP_shuffle_mask)),
3041 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
3042 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (load addr:$src2),
3043 MOVLP_shuffle_mask)),
3044 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3045 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (load addr:$src2),
3046 MOVHP_shuffle_mask)),
3047 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
3048 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (load addr:$src2),
3049 MOVHP_shuffle_mask)),
3050 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3052 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (load addr:$src2),
3053 MOVLP_shuffle_mask)),
3054 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3055 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (load addr:$src2),
3056 MOVLP_shuffle_mask)),
3057 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3058 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (load addr:$src2),
3059 MOVHP_shuffle_mask)),
3060 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
3061 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (load addr:$src2),
3062 MOVHP_shuffle_mask)),
3063 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3066 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3067 // (store (vector_shuffle (load addr), v2, <0, 1, 4, 5>), addr) using MOVHPS
3068 def : Pat<(store (v4f32 (vector_shuffle (load addr:$src1), VR128:$src2,
3069 MOVLP_shuffle_mask)), addr:$src1),
3070 (MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3071 def : Pat<(store (v2f64 (vector_shuffle (load addr:$src1), VR128:$src2,
3072 MOVLP_shuffle_mask)), addr:$src1),
3073 (MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3074 def : Pat<(store (v4f32 (vector_shuffle (load addr:$src1), VR128:$src2,
3075 MOVHP_shuffle_mask)), addr:$src1),
3076 (MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3077 def : Pat<(store (v2f64 (vector_shuffle (load addr:$src1), VR128:$src2,
3078 MOVHP_shuffle_mask)), addr:$src1),
3079 (MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3081 def : Pat<(store (v4i32 (vector_shuffle
3082 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2,
3083 MOVLP_shuffle_mask)), addr:$src1),
3084 (MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3085 def : Pat<(store (v2i64 (vector_shuffle (load addr:$src1), VR128:$src2,
3086 MOVLP_shuffle_mask)), addr:$src1),
3087 (MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3088 def : Pat<(store (v4i32 (vector_shuffle
3089 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2,
3090 MOVHP_shuffle_mask)), addr:$src1),
3091 (MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3092 def : Pat<(store (v2i64 (vector_shuffle (load addr:$src1), VR128:$src2,
3093 MOVHP_shuffle_mask)), addr:$src1),
3094 (MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3097 let AddedComplexity = 15 in {
3098 // Setting the lowest element in the vector.
3099 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
3100 MOVL_shuffle_mask)),
3101 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3102 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
3103 MOVL_shuffle_mask)),
3104 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3106 // vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
3107 def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
3108 MOVLP_shuffle_mask)),
3109 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3110 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
3111 MOVLP_shuffle_mask)),
3112 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3115 // Set lowest element and zero upper elements.
3116 let AddedComplexity = 15 in
3117 def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc, VR128:$src,
3118 MOVL_shuffle_mask)),
3119 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3120 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3121 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3123 // Some special case pandn patterns.
3124 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3126 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3127 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3129 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3130 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3132 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3134 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3135 (memop addr:$src2))),
3136 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3137 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3138 (memop addr:$src2))),
3139 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3140 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3141 (memop addr:$src2))),
3142 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3144 // vector -> vector casts
3145 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3146 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3147 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3148 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3149 def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
3150 (Int_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
3151 def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
3152 (Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
3154 // Use movaps / movups for SSE integer load / store (one byte shorter).
3155 def : Pat<(alignedloadv4i32 addr:$src),
3156 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
3157 def : Pat<(loadv4i32 addr:$src),
3158 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
3159 def : Pat<(alignedloadv2i64 addr:$src),
3160 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
3161 def : Pat<(loadv2i64 addr:$src),
3162 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
3164 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3165 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3166 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3167 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3168 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3169 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3170 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3171 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3172 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3173 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3174 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3175 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3176 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3177 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3178 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3179 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3181 //===----------------------------------------------------------------------===//
3182 // SSE4.1 Instructions
3183 //===----------------------------------------------------------------------===//
3185 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
3188 Intrinsic V2F64Int> {
3189 // Intrinsic operation, reg.
3190 // Vector intrinsic operation, reg
3191 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
3192 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3193 !strconcat(OpcodeStr,
3194 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3195 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
3198 // Vector intrinsic operation, mem
3199 def PSm_Int : SS4AIi8<opcps, MRMSrcMem,
3200 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
3201 !strconcat(OpcodeStr,
3202 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3204 (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
3207 // Vector intrinsic operation, reg
3208 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
3209 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3210 !strconcat(OpcodeStr,
3211 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3212 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
3215 // Vector intrinsic operation, mem
3216 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
3217 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
3218 !strconcat(OpcodeStr,
3219 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3221 (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
3225 let Constraints = "$src1 = $dst" in {
3226 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
3230 // Intrinsic operation, reg.
3231 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
3233 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
3234 !strconcat(OpcodeStr,
3235 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3237 (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
3240 // Intrinsic operation, mem.
3241 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
3243 (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
3244 !strconcat(OpcodeStr,
3245 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3247 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
3250 // Intrinsic operation, reg.
3251 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
3253 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
3254 !strconcat(OpcodeStr,
3255 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3257 (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
3260 // Intrinsic operation, mem.
3261 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
3263 (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
3264 !strconcat(OpcodeStr,
3265 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3267 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
3272 // FP round - roundss, roundps, roundsd, roundpd
3273 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
3274 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
3275 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
3276 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
3278 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
3279 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
3280 Intrinsic IntId128> {
3281 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3283 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3284 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
3285 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3287 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3290 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
3293 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
3294 int_x86_sse41_phminposuw>;
3296 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
3297 let Constraints = "$src1 = $dst" in {
3298 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
3299 Intrinsic IntId128, bit Commutable = 0> {
3300 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3301 (ins VR128:$src1, VR128:$src2),
3302 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3303 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3305 let isCommutable = Commutable;
3307 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3308 (ins VR128:$src1, i128mem:$src2),
3309 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3311 (IntId128 VR128:$src1,
3312 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3316 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
3317 int_x86_sse41_pcmpeqq, 1>;
3318 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
3319 int_x86_sse41_packusdw, 0>;
3320 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
3321 int_x86_sse41_pminsb, 1>;
3322 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
3323 int_x86_sse41_pminsd, 1>;
3324 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
3325 int_x86_sse41_pminud, 1>;
3326 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
3327 int_x86_sse41_pminuw, 1>;
3328 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
3329 int_x86_sse41_pmaxsb, 1>;
3330 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
3331 int_x86_sse41_pmaxsd, 1>;
3332 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
3333 int_x86_sse41_pmaxud, 1>;
3334 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
3335 int_x86_sse41_pmaxuw, 1>;
3337 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq, 1>;
3339 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
3340 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
3341 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
3342 (PCMPEQQrm VR128:$src1, addr:$src2)>;
3344 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
3345 let Constraints = "$src1 = $dst" in {
3346 multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, ValueType OpVT,
3347 SDNode OpNode, Intrinsic IntId128,
3348 bit Commutable = 0> {
3349 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3350 (ins VR128:$src1, VR128:$src2),
3351 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3352 [(set VR128:$dst, (OpNode (OpVT VR128:$src1),
3353 VR128:$src2))]>, OpSize {
3354 let isCommutable = Commutable;
3356 def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3357 (ins VR128:$src1, VR128:$src2),
3358 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3359 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3361 let isCommutable = Commutable;
3363 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3364 (ins VR128:$src1, i128mem:$src2),
3365 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3367 (OpNode VR128:$src1, (memop addr:$src2)))]>, OpSize;
3368 def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3369 (ins VR128:$src1, i128mem:$src2),
3370 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3372 (IntId128 VR128:$src1, (memop addr:$src2)))]>,
3376 defm PMULLD : SS41I_binop_patint<0x40, "pmulld", v4i32, mul,
3377 int_x86_sse41_pmulld, 1>;
3379 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
3380 let Constraints = "$src1 = $dst" in {
3381 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
3382 Intrinsic IntId128, bit Commutable = 0> {
3383 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3384 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
3385 !strconcat(OpcodeStr,
3386 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3388 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
3390 let isCommutable = Commutable;
3392 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3393 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
3394 !strconcat(OpcodeStr,
3395 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3397 (IntId128 VR128:$src1,
3398 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
3403 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
3404 int_x86_sse41_blendps, 0>;
3405 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
3406 int_x86_sse41_blendpd, 0>;
3407 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
3408 int_x86_sse41_pblendw, 0>;
3409 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
3410 int_x86_sse41_dpps, 1>;
3411 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
3412 int_x86_sse41_dppd, 1>;
3413 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
3414 int_x86_sse41_mpsadbw, 1>;
3417 /// SS41I_ternary_int - SSE 4.1 ternary operator
3418 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
3419 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3420 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3421 (ins VR128:$src1, VR128:$src2),
3422 !strconcat(OpcodeStr,
3423 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3424 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
3427 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3428 (ins VR128:$src1, i128mem:$src2),
3429 !strconcat(OpcodeStr,
3430 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3433 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
3437 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
3438 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
3439 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
3442 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3443 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3444 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3445 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3447 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3448 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3450 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3454 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3455 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3456 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3457 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3458 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3459 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3461 // Common patterns involving scalar load.
3462 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
3463 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3464 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
3465 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3467 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
3468 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3469 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
3470 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3472 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
3473 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3474 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
3475 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3477 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
3478 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3479 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
3480 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3482 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
3483 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
3484 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
3485 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
3487 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
3488 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
3489 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
3490 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
3493 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3494 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3495 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3496 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3498 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3499 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3501 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
3505 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
3506 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
3507 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
3508 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
3510 // Common patterns involving scalar load
3511 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
3512 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
3513 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
3514 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
3516 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
3517 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
3518 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
3519 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
3522 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3523 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3524 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3525 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3527 // Expecting a i16 load any extended to i32 value.
3528 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
3529 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3530 [(set VR128:$dst, (IntId (bitconvert
3531 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
3535 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
3536 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovsxbq", int_x86_sse41_pmovzxbq>;
3538 // Common patterns involving scalar load
3539 def : Pat<(int_x86_sse41_pmovsxbq
3540 (bitconvert (v4i32 (X86vzmovl
3541 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
3542 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
3544 def : Pat<(int_x86_sse41_pmovzxbq
3545 (bitconvert (v4i32 (X86vzmovl
3546 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
3547 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
3550 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
3551 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
3552 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3553 (ins VR128:$src1, i32i8imm:$src2),
3554 !strconcat(OpcodeStr,
3555 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3556 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
3558 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3559 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
3560 !strconcat(OpcodeStr,
3561 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3564 // There's an AssertZext in the way of writing the store pattern
3565 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3568 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
3571 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
3572 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
3573 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3574 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
3575 !strconcat(OpcodeStr,
3576 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3579 // There's an AssertZext in the way of writing the store pattern
3580 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3583 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
3586 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
3587 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
3588 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3589 (ins VR128:$src1, i32i8imm:$src2),
3590 !strconcat(OpcodeStr,
3591 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3593 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
3594 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3595 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
3596 !strconcat(OpcodeStr,
3597 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3598 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
3599 addr:$dst)]>, OpSize;
3602 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
3605 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
3607 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
3608 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
3609 (ins VR128:$src1, i32i8imm:$src2),
3610 !strconcat(OpcodeStr,
3611 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3613 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
3615 def mr : SS4AIi8<opc, MRMDestMem, (outs),
3616 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
3617 !strconcat(OpcodeStr,
3618 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3619 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
3620 addr:$dst)]>, OpSize;
3623 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
3625 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
3626 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
3629 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
3630 Requires<[HasSSE41]>;
3632 let Constraints = "$src1 = $dst" in {
3633 multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
3634 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3635 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3636 !strconcat(OpcodeStr,
3637 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3639 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
3640 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3641 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
3642 !strconcat(OpcodeStr,
3643 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3645 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
3646 imm:$src3))]>, OpSize;
3650 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
3652 let Constraints = "$src1 = $dst" in {
3653 multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
3654 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3655 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3656 !strconcat(OpcodeStr,
3657 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3659 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
3661 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3662 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
3663 !strconcat(OpcodeStr,
3664 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3666 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
3667 imm:$src3)))]>, OpSize;
3671 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
3673 let Constraints = "$src1 = $dst" in {
3674 multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
3675 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
3676 (ins VR128:$src1, FR32:$src2, i32i8imm:$src3),
3677 !strconcat(OpcodeStr,
3678 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3680 (X86insrtps VR128:$src1, FR32:$src2, imm:$src3))]>, OpSize;
3681 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
3682 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
3683 !strconcat(OpcodeStr,
3684 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3686 (X86insrtps VR128:$src1, (loadf32 addr:$src2),
3687 imm:$src3))]>, OpSize;
3691 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
3693 let Defs = [EFLAGS] in {
3694 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
3695 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3696 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
3697 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3700 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3701 "movntdqa\t{$src, $dst|$dst, $src}",
3702 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
3704 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
3705 let Constraints = "$src1 = $dst" in {
3706 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
3707 Intrinsic IntId128, bit Commutable = 0> {
3708 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
3709 (ins VR128:$src1, VR128:$src2),
3710 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3711 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3713 let isCommutable = Commutable;
3715 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
3716 (ins VR128:$src1, i128mem:$src2),
3717 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3719 (IntId128 VR128:$src1,
3720 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3724 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
3726 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
3727 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
3728 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
3729 (PCMPGTQrm VR128:$src1, addr:$src2)>;