1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Evan Cheng and is distributed under the University
6 // of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE specific DAG Nodes.
19 //===----------------------------------------------------------------------===//
21 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
24 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
25 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
26 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
33 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
34 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
35 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
36 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
37 def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>;
38 def X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>;
39 def X86pinsrw : SDNode<"X86ISD::PINSRW", SDTypeProfile<1, 3, []>, []>;
41 //===----------------------------------------------------------------------===//
42 // SSE 'Special' Instructions
43 //===----------------------------------------------------------------------===//
45 def IMPLICIT_DEF_VR128 : I<0, Pseudo, (outs VR128:$dst), (ins),
47 [(set VR128:$dst, (v4f32 (undef)))]>,
49 def IMPLICIT_DEF_FR32 : I<0, Pseudo, (outs FR32:$dst), (ins),
51 [(set FR32:$dst, (undef))]>, Requires<[HasSSE1]>;
52 def IMPLICIT_DEF_FR64 : I<0, Pseudo, (outs FR64:$dst), (ins),
54 [(set FR64:$dst, (undef))]>, Requires<[HasSSE2]>;
56 //===----------------------------------------------------------------------===//
57 // SSE Complex Patterns
58 //===----------------------------------------------------------------------===//
60 // These are 'extloads' from a scalar to the low element of a vector, zeroing
61 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
63 def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
65 def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
68 def ssmem : Operand<v4f32> {
69 let PrintMethod = "printf32mem";
70 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
72 def sdmem : Operand<v2f64> {
73 let PrintMethod = "printf64mem";
74 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
77 //===----------------------------------------------------------------------===//
78 // SSE pattern fragments
79 //===----------------------------------------------------------------------===//
81 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
82 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
83 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
84 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
86 // Like 'store', but always requires vector alignment.
87 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
88 (st node:$val, node:$ptr), [{
89 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
90 return !ST->isTruncatingStore() &&
91 ST->getAddressingMode() == ISD::UNINDEXED &&
92 ST->getAlignment() >= 16;
96 // Like 'load', but always requires vector alignment.
97 def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
98 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
99 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
100 LD->getAddressingMode() == ISD::UNINDEXED &&
101 LD->getAlignment() >= 16;
105 def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
106 def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
107 def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
108 def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
109 def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
110 def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
112 // Like 'load', but uses special alignment checks suitable for use in
113 // memory operands in most SSE instructions, which are required to
114 // be naturally aligned on some targets but not on others.
115 // FIXME: Actually implement support for targets that don't require the
116 // alignment. This probably wants a subtarget predicate.
117 def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
118 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
119 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
120 LD->getAddressingMode() == ISD::UNINDEXED &&
121 LD->getAlignment() >= 16;
125 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
126 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
127 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
128 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
129 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
130 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
132 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
134 def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
135 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
136 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
137 LD->getAddressingMode() == ISD::UNINDEXED &&
138 LD->getAlignment() >= 8;
142 def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
143 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop64 node:$ptr))>;
144 def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
145 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
146 def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
148 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
149 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
150 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
151 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
152 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
153 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
155 def fp32imm0 : PatLeaf<(f32 fpimm), [{
156 return N->isExactlyValue(+0.0);
159 def PSxLDQ_imm : SDNodeXForm<imm, [{
160 // Transformation function: imm >> 3
161 return getI32Imm(N->getValue() >> 3);
164 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
166 def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
167 return getI8Imm(X86::getShuffleSHUFImmediate(N));
170 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
172 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
173 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
176 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
178 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
179 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
182 def SSE_splat_mask : PatLeaf<(build_vector), [{
183 return X86::isSplatMask(N);
184 }], SHUFFLE_get_shuf_imm>;
186 def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
187 return X86::isSplatLoMask(N);
190 def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
191 return X86::isMOVHLPSMask(N);
194 def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
195 return X86::isMOVHLPS_v_undef_Mask(N);
198 def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
199 return X86::isMOVHPMask(N);
202 def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
203 return X86::isMOVLPMask(N);
206 def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
207 return X86::isMOVLMask(N);
210 def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
211 return X86::isMOVSHDUPMask(N);
214 def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
215 return X86::isMOVSLDUPMask(N);
218 def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
219 return X86::isUNPCKLMask(N);
222 def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
223 return X86::isUNPCKHMask(N);
226 def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
227 return X86::isUNPCKL_v_undef_Mask(N);
230 def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
231 return X86::isUNPCKH_v_undef_Mask(N);
234 def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
235 return X86::isPSHUFDMask(N);
236 }], SHUFFLE_get_shuf_imm>;
238 def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
239 return X86::isPSHUFHWMask(N);
240 }], SHUFFLE_get_pshufhw_imm>;
242 def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
243 return X86::isPSHUFLWMask(N);
244 }], SHUFFLE_get_pshuflw_imm>;
246 def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
247 return X86::isPSHUFDMask(N);
248 }], SHUFFLE_get_shuf_imm>;
250 def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
251 return X86::isSHUFPMask(N);
252 }], SHUFFLE_get_shuf_imm>;
254 def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
255 return X86::isSHUFPMask(N);
256 }], SHUFFLE_get_shuf_imm>;
258 //===----------------------------------------------------------------------===//
259 // SSE scalar FP Instructions
260 //===----------------------------------------------------------------------===//
262 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
263 // scheduler into a branch sequence.
264 // These are expanded by the scheduler.
265 let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
266 def CMOV_FR32 : I<0, Pseudo,
267 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
268 "#CMOV_FR32 PSEUDO!",
269 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
271 def CMOV_FR64 : I<0, Pseudo,
272 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
273 "#CMOV_FR64 PSEUDO!",
274 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
276 def CMOV_V4F32 : I<0, Pseudo,
277 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
278 "#CMOV_V4F32 PSEUDO!",
280 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
282 def CMOV_V2F64 : I<0, Pseudo,
283 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
284 "#CMOV_V2F64 PSEUDO!",
286 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
288 def CMOV_V2I64 : I<0, Pseudo,
289 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
290 "#CMOV_V2I64 PSEUDO!",
292 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
296 //===----------------------------------------------------------------------===//
298 //===----------------------------------------------------------------------===//
301 def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
302 "movss\t{$src, $dst|$dst, $src}", []>;
303 let isLoad = 1, isReMaterializable = 1 in
304 def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
305 "movss\t{$src, $dst|$dst, $src}",
306 [(set FR32:$dst, (loadf32 addr:$src))]>;
307 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
308 "movss\t{$src, $dst|$dst, $src}",
309 [(store FR32:$src, addr:$dst)]>;
311 // Conversion instructions
312 def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
313 "cvttss2si\t{$src, $dst|$dst, $src}",
314 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
315 def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
316 "cvttss2si\t{$src, $dst|$dst, $src}",
317 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
318 def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
319 "cvtsi2ss\t{$src, $dst|$dst, $src}",
320 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
321 def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
322 "cvtsi2ss\t{$src, $dst|$dst, $src}",
323 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
325 // Match intrinsics which expect XMM operand(s).
326 def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
327 "cvtss2si\t{$src, $dst|$dst, $src}",
328 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
329 def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
330 "cvtss2si\t{$src, $dst|$dst, $src}",
331 [(set GR32:$dst, (int_x86_sse_cvtss2si
332 (load addr:$src)))]>;
334 // Match intrinisics which expect MM and XMM operand(s).
335 def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
336 "cvtps2pi\t{$src, $dst|$dst, $src}",
337 [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
338 def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
339 "cvtps2pi\t{$src, $dst|$dst, $src}",
340 [(set VR64:$dst, (int_x86_sse_cvtps2pi
341 (load addr:$src)))]>;
342 def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
343 "cvttps2pi\t{$src, $dst|$dst, $src}",
344 [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
345 def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
346 "cvttps2pi\t{$src, $dst|$dst, $src}",
347 [(set VR64:$dst, (int_x86_sse_cvttps2pi
348 (load addr:$src)))]>;
349 let isTwoAddress = 1 in {
350 def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
351 (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
352 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
353 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
355 def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
356 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
357 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
358 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
359 (load addr:$src2)))]>;
362 // Aliases for intrinsics
363 def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
364 "cvttss2si\t{$src, $dst|$dst, $src}",
366 (int_x86_sse_cvttss2si VR128:$src))]>;
367 def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
368 "cvttss2si\t{$src, $dst|$dst, $src}",
370 (int_x86_sse_cvttss2si(load addr:$src)))]>;
372 let isTwoAddress = 1 in {
373 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
374 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
375 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
376 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
378 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
379 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
380 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
381 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
382 (loadi32 addr:$src2)))]>;
385 // Comparison instructions
386 let isTwoAddress = 1 in {
387 def CMPSSrr : SSI<0xC2, MRMSrcReg,
388 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
389 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
390 def CMPSSrm : SSI<0xC2, MRMSrcMem,
391 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
392 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
395 let Defs = [EFLAGS] in {
396 def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
397 "ucomiss\t{$src2, $src1|$src1, $src2}",
398 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
399 def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
400 "ucomiss\t{$src2, $src1|$src1, $src2}",
401 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
405 // Aliases to match intrinsics which expect XMM operand(s).
406 let isTwoAddress = 1 in {
407 def Int_CMPSSrr : SSI<0xC2, MRMSrcReg,
408 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
409 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
410 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
411 VR128:$src, imm:$cc))]>;
412 def Int_CMPSSrm : SSI<0xC2, MRMSrcMem,
413 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
414 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
415 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
416 (load addr:$src), imm:$cc))]>;
419 let Defs = [EFLAGS] in {
420 def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
421 (ins VR128:$src1, VR128:$src2),
422 "ucomiss\t{$src2, $src1|$src1, $src2}",
423 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
425 def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
426 (ins VR128:$src1, f128mem:$src2),
427 "ucomiss\t{$src2, $src1|$src1, $src2}",
428 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
431 def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
432 (ins VR128:$src1, VR128:$src2),
433 "comiss\t{$src2, $src1|$src1, $src2}",
434 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
436 def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
437 (ins VR128:$src1, f128mem:$src2),
438 "comiss\t{$src2, $src1|$src1, $src2}",
439 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
443 // Aliases of packed SSE1 instructions for scalar use. These all have names that
446 // Alias instructions that map fld0 to pxor for sse.
447 let isReMaterializable = 1 in
448 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
449 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
450 Requires<[HasSSE1]>, TB, OpSize;
452 // Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
454 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
455 "movaps\t{$src, $dst|$dst, $src}", []>;
457 // Alias instruction to load FR32 from f128mem using movaps. Upper bits are
460 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
461 "movaps\t{$src, $dst|$dst, $src}",
462 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
464 // Alias bitwise logical operations using SSE logical ops on packed FP values.
465 let isTwoAddress = 1 in {
466 let isCommutable = 1 in {
467 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
468 "andps\t{$src2, $dst|$dst, $src2}",
469 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
470 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
471 "orps\t{$src2, $dst|$dst, $src2}",
472 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
473 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
474 "xorps\t{$src2, $dst|$dst, $src2}",
475 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
478 def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
479 "andps\t{$src2, $dst|$dst, $src2}",
480 [(set FR32:$dst, (X86fand FR32:$src1,
481 (memopfsf32 addr:$src2)))]>;
482 def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
483 "orps\t{$src2, $dst|$dst, $src2}",
484 [(set FR32:$dst, (X86for FR32:$src1,
485 (memopfsf32 addr:$src2)))]>;
486 def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
487 "xorps\t{$src2, $dst|$dst, $src2}",
488 [(set FR32:$dst, (X86fxor FR32:$src1,
489 (memopfsf32 addr:$src2)))]>;
491 def FsANDNPSrr : PSI<0x55, MRMSrcReg,
492 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
493 "andnps\t{$src2, $dst|$dst, $src2}", []>;
494 def FsANDNPSrm : PSI<0x55, MRMSrcMem,
495 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
496 "andnps\t{$src2, $dst|$dst, $src2}", []>;
499 /// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
501 /// In addition, we also have a special variant of the scalar form here to
502 /// represent the associated intrinsic operation. This form is unlike the
503 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
504 /// and leaves the top elements undefined.
506 /// These three forms can each be reg+reg or reg+mem, so there are a total of
507 /// six "instructions".
509 let isTwoAddress = 1 in {
510 multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
511 SDNode OpNode, Intrinsic F32Int,
512 bit Commutable = 0> {
513 // Scalar operation, reg+reg.
514 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
515 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
516 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
517 let isCommutable = Commutable;
520 // Scalar operation, reg+mem.
521 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
522 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
523 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
525 // Vector operation, reg+reg.
526 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
527 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
528 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
529 let isCommutable = Commutable;
532 // Vector operation, reg+mem.
533 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
534 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
535 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
537 // Intrinsic operation, reg+reg.
538 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
539 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
540 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
541 let isCommutable = Commutable;
544 // Intrinsic operation, reg+mem.
545 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
546 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
547 [(set VR128:$dst, (F32Int VR128:$src1,
548 sse_load_f32:$src2))]>;
552 // Arithmetic instructions
553 defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
554 defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
555 defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
556 defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
558 /// sse1_fp_binop_rm - Other SSE1 binops
560 /// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
561 /// instructions for a full-vector intrinsic form. Operations that map
562 /// onto C operators don't use this form since they just use the plain
563 /// vector form instead of having a separate vector intrinsic form.
565 /// This provides a total of eight "instructions".
567 let isTwoAddress = 1 in {
568 multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
572 bit Commutable = 0> {
574 // Scalar operation, reg+reg.
575 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
576 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
577 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
578 let isCommutable = Commutable;
581 // Scalar operation, reg+mem.
582 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
583 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
584 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
586 // Vector operation, reg+reg.
587 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
588 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
589 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
590 let isCommutable = Commutable;
593 // Vector operation, reg+mem.
594 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
595 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
596 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
598 // Intrinsic operation, reg+reg.
599 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
600 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
601 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
602 let isCommutable = Commutable;
605 // Intrinsic operation, reg+mem.
606 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
607 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
608 [(set VR128:$dst, (F32Int VR128:$src1,
609 sse_load_f32:$src2))]>;
611 // Vector intrinsic operation, reg+reg.
612 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
613 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
614 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
615 let isCommutable = Commutable;
618 // Vector intrinsic operation, reg+mem.
619 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
620 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
621 [(set VR128:$dst, (V4F32Int VR128:$src1, (load addr:$src2)))]>;
625 defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
626 int_x86_sse_max_ss, int_x86_sse_max_ps>;
627 defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
628 int_x86_sse_min_ss, int_x86_sse_min_ps>;
630 //===----------------------------------------------------------------------===//
631 // SSE packed FP Instructions
634 def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
635 "movaps\t{$src, $dst|$dst, $src}", []>;
636 let isLoad = 1, isReMaterializable = 1 in
637 def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
638 "movaps\t{$src, $dst|$dst, $src}",
639 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
641 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
642 "movaps\t{$src, $dst|$dst, $src}",
643 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
645 def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
646 "movups\t{$src, $dst|$dst, $src}", []>;
648 def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
649 "movups\t{$src, $dst|$dst, $src}",
650 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
651 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
652 "movups\t{$src, $dst|$dst, $src}",
653 [(store (v4f32 VR128:$src), addr:$dst)]>;
655 // Intrinsic forms of MOVUPS load and store
657 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
658 "movups\t{$src, $dst|$dst, $src}",
659 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
660 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
661 "movups\t{$src, $dst|$dst, $src}",
662 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
664 let isTwoAddress = 1 in {
665 let AddedComplexity = 20 in {
666 def MOVLPSrm : PSI<0x12, MRMSrcMem,
667 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
668 "movlps\t{$src2, $dst|$dst, $src2}",
670 (v4f32 (vector_shuffle VR128:$src1,
671 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
672 MOVLP_shuffle_mask)))]>;
673 def MOVHPSrm : PSI<0x16, MRMSrcMem,
674 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
675 "movhps\t{$src2, $dst|$dst, $src2}",
677 (v4f32 (vector_shuffle VR128:$src1,
678 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
679 MOVHP_shuffle_mask)))]>;
683 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
684 "movlps\t{$src, $dst|$dst, $src}",
685 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
686 (iPTR 0))), addr:$dst)]>;
688 // v2f64 extract element 1 is always custom lowered to unpack high to low
689 // and extract element 0 so the non-store version isn't too horrible.
690 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
691 "movhps\t{$src, $dst|$dst, $src}",
692 [(store (f64 (vector_extract
693 (v2f64 (vector_shuffle
694 (bc_v2f64 (v4f32 VR128:$src)), (undef),
695 UNPCKH_shuffle_mask)), (iPTR 0))),
698 let isTwoAddress = 1 in {
699 let AddedComplexity = 15 in {
700 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
701 "movlhps\t{$src2, $dst|$dst, $src2}",
703 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
704 MOVHP_shuffle_mask)))]>;
706 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
707 "movhlps\t{$src2, $dst|$dst, $src2}",
709 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
710 MOVHLPS_shuffle_mask)))]>;
718 /// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
720 /// In addition, we also have a special variant of the scalar form here to
721 /// represent the associated intrinsic operation. This form is unlike the
722 /// plain scalar form, in that it takes an entire vector (instead of a
723 /// scalar) and leaves the top elements undefined.
725 /// And, we have a special variant form for a full-vector intrinsic form.
727 /// These four forms can each have a reg or a mem operand, so there are a
728 /// total of eight "instructions".
730 multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
734 bit Commutable = 0> {
735 // Scalar operation, reg.
736 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
737 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
738 [(set FR32:$dst, (OpNode FR32:$src))]> {
739 let isCommutable = Commutable;
742 // Scalar operation, mem.
743 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
744 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
745 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
747 // Vector operation, reg.
748 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
749 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
750 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
751 let isCommutable = Commutable;
754 // Vector operation, mem.
755 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
756 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
757 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
759 // Intrinsic operation, reg.
760 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
761 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
762 [(set VR128:$dst, (F32Int VR128:$src))]> {
763 let isCommutable = Commutable;
766 // Intrinsic operation, mem.
767 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
768 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
769 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
771 // Vector intrinsic operation, reg
772 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
773 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
774 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
775 let isCommutable = Commutable;
778 // Vector intrinsic operation, mem
779 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
780 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
781 [(set VR128:$dst, (V4F32Int (load addr:$src)))]>;
785 defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
786 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
788 // Reciprocal approximations. Note that these typically require refinement
789 // in order to obtain suitable precision.
790 defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
791 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
792 defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
793 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
796 let isTwoAddress = 1 in {
797 let isCommutable = 1 in {
798 def ANDPSrr : PSI<0x54, MRMSrcReg,
799 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
800 "andps\t{$src2, $dst|$dst, $src2}",
801 [(set VR128:$dst, (v2i64
802 (and VR128:$src1, VR128:$src2)))]>;
803 def ORPSrr : PSI<0x56, MRMSrcReg,
804 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
805 "orps\t{$src2, $dst|$dst, $src2}",
806 [(set VR128:$dst, (v2i64
807 (or VR128:$src1, VR128:$src2)))]>;
808 def XORPSrr : PSI<0x57, MRMSrcReg,
809 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
810 "xorps\t{$src2, $dst|$dst, $src2}",
811 [(set VR128:$dst, (v2i64
812 (xor VR128:$src1, VR128:$src2)))]>;
815 def ANDPSrm : PSI<0x54, MRMSrcMem,
816 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
817 "andps\t{$src2, $dst|$dst, $src2}",
818 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
819 (memopv2i64 addr:$src2)))]>;
820 def ORPSrm : PSI<0x56, MRMSrcMem,
821 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
822 "orps\t{$src2, $dst|$dst, $src2}",
823 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
824 (memopv2i64 addr:$src2)))]>;
825 def XORPSrm : PSI<0x57, MRMSrcMem,
826 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
827 "xorps\t{$src2, $dst|$dst, $src2}",
828 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
829 (memopv2i64 addr:$src2)))]>;
830 def ANDNPSrr : PSI<0x55, MRMSrcReg,
831 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
832 "andnps\t{$src2, $dst|$dst, $src2}",
834 (v2i64 (and (xor VR128:$src1,
835 (bc_v2i64 (v4i32 immAllOnesV))),
837 def ANDNPSrm : PSI<0x55, MRMSrcMem,
838 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
839 "andnps\t{$src2, $dst|$dst, $src2}",
841 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
842 (bc_v2i64 (v4i32 immAllOnesV))),
843 (memopv2i64 addr:$src2))))]>;
846 let isTwoAddress = 1 in {
847 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
848 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
849 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
850 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
851 VR128:$src, imm:$cc))]>;
852 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
853 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
854 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
855 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
856 (load addr:$src), imm:$cc))]>;
859 // Shuffle and unpack instructions
860 let isTwoAddress = 1 in {
861 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
862 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
863 (outs VR128:$dst), (ins VR128:$src1,
864 VR128:$src2, i32i8imm:$src3),
865 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
867 (v4f32 (vector_shuffle
868 VR128:$src1, VR128:$src2,
869 SHUFP_shuffle_mask:$src3)))]>;
870 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
871 (outs VR128:$dst), (ins VR128:$src1,
872 f128mem:$src2, i32i8imm:$src3),
873 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
875 (v4f32 (vector_shuffle
876 VR128:$src1, (memopv4f32 addr:$src2),
877 SHUFP_shuffle_mask:$src3)))]>;
879 let AddedComplexity = 10 in {
880 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
881 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
882 "unpckhps\t{$src2, $dst|$dst, $src2}",
884 (v4f32 (vector_shuffle
885 VR128:$src1, VR128:$src2,
886 UNPCKH_shuffle_mask)))]>;
887 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
888 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
889 "unpckhps\t{$src2, $dst|$dst, $src2}",
891 (v4f32 (vector_shuffle
892 VR128:$src1, (memopv4f32 addr:$src2),
893 UNPCKH_shuffle_mask)))]>;
895 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
896 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
897 "unpcklps\t{$src2, $dst|$dst, $src2}",
899 (v4f32 (vector_shuffle
900 VR128:$src1, VR128:$src2,
901 UNPCKL_shuffle_mask)))]>;
902 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
903 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
904 "unpcklps\t{$src2, $dst|$dst, $src2}",
906 (v4f32 (vector_shuffle
907 VR128:$src1, (memopv4f32 addr:$src2),
908 UNPCKL_shuffle_mask)))]>;
913 def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
914 "movmskps\t{$src, $dst|$dst, $src}",
915 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
916 def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
917 "movmskpd\t{$src, $dst|$dst, $src}",
918 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
920 // Prefetching loads.
921 // TODO: no intrinsics for these?
922 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src), "prefetcht0\t$src", []>;
923 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src), "prefetcht1\t$src", []>;
924 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src), "prefetcht2\t$src", []>;
925 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src), "prefetchnta\t$src", []>;
927 // Non-temporal stores
928 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
929 "movntps\t{$src, $dst|$dst, $src}",
930 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
932 // Load, store, and memory fence
933 def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
936 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
937 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
938 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
939 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
941 // Alias instructions that map zero vector to pxor / xorp* for sse.
942 let isReMaterializable = 1 in
943 def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
945 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
947 // FR32 to 128-bit vector conversion.
948 def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
949 "movss\t{$src, $dst|$dst, $src}",
951 (v4f32 (scalar_to_vector FR32:$src)))]>;
952 def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
953 "movss\t{$src, $dst|$dst, $src}",
955 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
957 // FIXME: may not be able to eliminate this movss with coalescing the src and
958 // dest register classes are different. We really want to write this pattern
960 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
962 def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
963 "movss\t{$src, $dst|$dst, $src}",
964 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
966 def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
967 "movss\t{$src, $dst|$dst, $src}",
968 [(store (f32 (vector_extract (v4f32 VR128:$src),
969 (iPTR 0))), addr:$dst)]>;
972 // Move to lower bits of a VR128, leaving upper bits alone.
973 // Three operand (but two address) aliases.
974 let isTwoAddress = 1 in {
975 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
976 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
977 "movss\t{$src2, $dst|$dst, $src2}", []>;
979 let AddedComplexity = 15 in
980 def MOVLPSrr : SSI<0x10, MRMSrcReg,
981 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
982 "movss\t{$src2, $dst|$dst, $src2}",
984 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
985 MOVL_shuffle_mask)))]>;
988 // Move to lower bits of a VR128 and zeroing upper bits.
989 // Loading from memory automatically zeroing upper bits.
990 let AddedComplexity = 20 in
991 def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
992 "movss\t{$src, $dst|$dst, $src}",
993 [(set VR128:$dst, (v4f32 (vector_shuffle immAllZerosV_bc,
994 (v4f32 (scalar_to_vector (loadf32 addr:$src))),
995 MOVL_shuffle_mask)))]>;
998 //===----------------------------------------------------------------------===//
1000 //===----------------------------------------------------------------------===//
1002 // Move Instructions
1003 def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1004 "movsd\t{$src, $dst|$dst, $src}", []>;
1005 let isLoad = 1, isReMaterializable = 1 in
1006 def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1007 "movsd\t{$src, $dst|$dst, $src}",
1008 [(set FR64:$dst, (loadf64 addr:$src))]>;
1009 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
1010 "movsd\t{$src, $dst|$dst, $src}",
1011 [(store FR64:$src, addr:$dst)]>;
1013 // Conversion instructions
1014 def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
1015 "cvttsd2si\t{$src, $dst|$dst, $src}",
1016 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
1017 def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
1018 "cvttsd2si\t{$src, $dst|$dst, $src}",
1019 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
1020 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1021 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1022 [(set FR32:$dst, (fround FR64:$src))]>;
1023 def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1024 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1025 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
1026 def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
1027 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1028 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
1029 def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
1030 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1031 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1033 // SSE2 instructions with XS prefix
1034 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1035 "cvtss2sd\t{$src, $dst|$dst, $src}",
1036 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1037 Requires<[HasSSE2]>;
1038 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1039 "cvtss2sd\t{$src, $dst|$dst, $src}",
1040 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1041 Requires<[HasSSE2]>;
1043 // Match intrinsics which expect XMM operand(s).
1044 def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1045 "cvtsd2si\t{$src, $dst|$dst, $src}",
1046 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
1047 def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1048 "cvtsd2si\t{$src, $dst|$dst, $src}",
1049 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1050 (load addr:$src)))]>;
1052 // Match intrinisics which expect MM and XMM operand(s).
1053 def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1054 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1055 [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
1056 def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1057 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1058 [(set VR64:$dst, (int_x86_sse_cvtpd2pi
1059 (load addr:$src)))]>;
1060 def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1061 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1062 [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
1063 def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1064 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1065 [(set VR64:$dst, (int_x86_sse_cvttpd2pi
1066 (load addr:$src)))]>;
1067 def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
1068 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1069 [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
1070 def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1071 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1072 [(set VR128:$dst, (int_x86_sse_cvtpi2pd
1073 (load addr:$src)))]>;
1075 // Aliases for intrinsics
1076 def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
1077 "cvttsd2si\t{$src, $dst|$dst, $src}",
1079 (int_x86_sse2_cvttsd2si VR128:$src))]>;
1080 def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
1081 "cvttsd2si\t{$src, $dst|$dst, $src}",
1082 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1083 (load addr:$src)))]>;
1085 // Comparison instructions
1086 let isTwoAddress = 1 in {
1087 def CMPSDrr : SDI<0xC2, MRMSrcReg,
1088 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
1089 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1090 def CMPSDrm : SDI<0xC2, MRMSrcMem,
1091 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
1092 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
1095 let Defs = [EFLAGS] in {
1096 def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
1097 "ucomisd\t{$src2, $src1|$src1, $src2}",
1098 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
1099 def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
1100 "ucomisd\t{$src2, $src1|$src1, $src2}",
1101 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
1102 (implicit EFLAGS)]>;
1105 // Aliases to match intrinsics which expect XMM operand(s).
1106 let isTwoAddress = 1 in {
1107 def Int_CMPSDrr : SDI<0xC2, MRMSrcReg,
1108 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1109 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1110 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1111 VR128:$src, imm:$cc))]>;
1112 def Int_CMPSDrm : SDI<0xC2, MRMSrcMem,
1113 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
1114 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1115 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1116 (load addr:$src), imm:$cc))]>;
1119 let Defs = [EFLAGS] in {
1120 def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1121 "ucomisd\t{$src2, $src1|$src1, $src2}",
1122 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1123 (implicit EFLAGS)]>;
1124 def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
1125 "ucomisd\t{$src2, $src1|$src1, $src2}",
1126 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1127 (implicit EFLAGS)]>;
1129 def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
1130 "comisd\t{$src2, $src1|$src1, $src2}",
1131 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1132 (implicit EFLAGS)]>;
1133 def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
1134 "comisd\t{$src2, $src1|$src1, $src2}",
1135 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
1136 (implicit EFLAGS)]>;
1139 // Aliases of packed SSE2 instructions for scalar use. These all have names that
1142 // Alias instructions that map fld0 to pxor for sse.
1143 let isReMaterializable = 1 in
1144 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
1145 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
1146 Requires<[HasSSE2]>, TB, OpSize;
1148 // Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1150 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1151 "movapd\t{$src, $dst|$dst, $src}", []>;
1153 // Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1156 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1157 "movapd\t{$src, $dst|$dst, $src}",
1158 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1160 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1161 let isTwoAddress = 1 in {
1162 let isCommutable = 1 in {
1163 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1164 "andpd\t{$src2, $dst|$dst, $src2}",
1165 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
1166 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1167 "orpd\t{$src2, $dst|$dst, $src2}",
1168 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
1169 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1170 "xorpd\t{$src2, $dst|$dst, $src2}",
1171 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1174 def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1175 "andpd\t{$src2, $dst|$dst, $src2}",
1176 [(set FR64:$dst, (X86fand FR64:$src1,
1177 (memopfsf64 addr:$src2)))]>;
1178 def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1179 "orpd\t{$src2, $dst|$dst, $src2}",
1180 [(set FR64:$dst, (X86for FR64:$src1,
1181 (memopfsf64 addr:$src2)))]>;
1182 def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1183 "xorpd\t{$src2, $dst|$dst, $src2}",
1184 [(set FR64:$dst, (X86fxor FR64:$src1,
1185 (memopfsf64 addr:$src2)))]>;
1187 def FsANDNPDrr : PDI<0x55, MRMSrcReg,
1188 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1189 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1190 def FsANDNPDrm : PDI<0x55, MRMSrcMem,
1191 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
1192 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
1195 /// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1197 /// In addition, we also have a special variant of the scalar form here to
1198 /// represent the associated intrinsic operation. This form is unlike the
1199 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1200 /// and leaves the top elements undefined.
1202 /// These three forms can each be reg+reg or reg+mem, so there are a total of
1203 /// six "instructions".
1205 let isTwoAddress = 1 in {
1206 multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1207 SDNode OpNode, Intrinsic F64Int,
1208 bit Commutable = 0> {
1209 // Scalar operation, reg+reg.
1210 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1211 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1212 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1213 let isCommutable = Commutable;
1216 // Scalar operation, reg+mem.
1217 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
1218 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1219 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1221 // Vector operation, reg+reg.
1222 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1223 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1224 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1225 let isCommutable = Commutable;
1228 // Vector operation, reg+mem.
1229 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1230 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1231 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1233 // Intrinsic operation, reg+reg.
1234 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1235 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1236 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1237 let isCommutable = Commutable;
1240 // Intrinsic operation, reg+mem.
1241 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1242 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1243 [(set VR128:$dst, (F64Int VR128:$src1,
1244 sse_load_f64:$src2))]>;
1248 // Arithmetic instructions
1249 defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1250 defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1251 defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1252 defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1254 /// sse2_fp_binop_rm - Other SSE2 binops
1256 /// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1257 /// instructions for a full-vector intrinsic form. Operations that map
1258 /// onto C operators don't use this form since they just use the plain
1259 /// vector form instead of having a separate vector intrinsic form.
1261 /// This provides a total of eight "instructions".
1263 let isTwoAddress = 1 in {
1264 multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1268 bit Commutable = 0> {
1270 // Scalar operation, reg+reg.
1271 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1272 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1273 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1274 let isCommutable = Commutable;
1277 // Scalar operation, reg+mem.
1278 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
1279 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1280 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1282 // Vector operation, reg+reg.
1283 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1284 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1285 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1286 let isCommutable = Commutable;
1289 // Vector operation, reg+mem.
1290 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1291 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1292 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
1294 // Intrinsic operation, reg+reg.
1295 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1296 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1297 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1298 let isCommutable = Commutable;
1301 // Intrinsic operation, reg+mem.
1302 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1303 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
1304 [(set VR128:$dst, (F64Int VR128:$src1,
1305 sse_load_f64:$src2))]>;
1307 // Vector intrinsic operation, reg+reg.
1308 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1309 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1310 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1311 let isCommutable = Commutable;
1314 // Vector intrinsic operation, reg+mem.
1315 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1316 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
1317 [(set VR128:$dst, (V2F64Int VR128:$src1, (load addr:$src2)))]>;
1321 defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1322 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1323 defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1324 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1326 //===----------------------------------------------------------------------===//
1327 // SSE packed FP Instructions
1329 // Move Instructions
1330 def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1331 "movapd\t{$src, $dst|$dst, $src}", []>;
1332 let isLoad = 1, isReMaterializable = 1 in
1333 def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1334 "movapd\t{$src, $dst|$dst, $src}",
1335 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
1337 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1338 "movapd\t{$src, $dst|$dst, $src}",
1339 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
1341 def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1342 "movupd\t{$src, $dst|$dst, $src}", []>;
1344 def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1345 "movupd\t{$src, $dst|$dst, $src}",
1346 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
1347 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1348 "movupd\t{$src, $dst|$dst, $src}",
1349 [(store (v2f64 VR128:$src), addr:$dst)]>;
1351 // Intrinsic forms of MOVUPD load and store
1352 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1353 "movupd\t{$src, $dst|$dst, $src}",
1354 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
1355 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1356 "movupd\t{$src, $dst|$dst, $src}",
1357 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
1359 let isTwoAddress = 1 in {
1360 let AddedComplexity = 20 in {
1361 def MOVLPDrm : PDI<0x12, MRMSrcMem,
1362 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1363 "movlpd\t{$src2, $dst|$dst, $src2}",
1365 (v2f64 (vector_shuffle VR128:$src1,
1366 (scalar_to_vector (loadf64 addr:$src2)),
1367 MOVLP_shuffle_mask)))]>;
1368 def MOVHPDrm : PDI<0x16, MRMSrcMem,
1369 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1370 "movhpd\t{$src2, $dst|$dst, $src2}",
1372 (v2f64 (vector_shuffle VR128:$src1,
1373 (scalar_to_vector (loadf64 addr:$src2)),
1374 MOVHP_shuffle_mask)))]>;
1375 } // AddedComplexity
1378 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1379 "movlpd\t{$src, $dst|$dst, $src}",
1380 [(store (f64 (vector_extract (v2f64 VR128:$src),
1381 (iPTR 0))), addr:$dst)]>;
1383 // v2f64 extract element 1 is always custom lowered to unpack high to low
1384 // and extract element 0 so the non-store version isn't too horrible.
1385 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1386 "movhpd\t{$src, $dst|$dst, $src}",
1387 [(store (f64 (vector_extract
1388 (v2f64 (vector_shuffle VR128:$src, (undef),
1389 UNPCKH_shuffle_mask)), (iPTR 0))),
1392 // SSE2 instructions without OpSize prefix
1393 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1394 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1395 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1396 TB, Requires<[HasSSE2]>;
1397 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1398 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1399 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1400 (bitconvert (memopv2i64 addr:$src))))]>,
1401 TB, Requires<[HasSSE2]>;
1403 // SSE2 instructions with XS prefix
1404 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1405 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1406 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1407 XS, Requires<[HasSSE2]>;
1408 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1409 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1410 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1411 (bitconvert (memopv2i64 addr:$src))))]>,
1412 XS, Requires<[HasSSE2]>;
1414 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1415 "cvtps2dq\t{$src, $dst|$dst, $src}",
1416 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1417 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1418 "cvtps2dq\t{$src, $dst|$dst, $src}",
1419 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1420 (load addr:$src)))]>;
1421 // SSE2 packed instructions with XS prefix
1422 def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1423 "cvttps2dq\t{$src, $dst|$dst, $src}",
1424 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1425 XS, Requires<[HasSSE2]>;
1426 def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1427 "cvttps2dq\t{$src, $dst|$dst, $src}",
1428 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1429 (load addr:$src)))]>,
1430 XS, Requires<[HasSSE2]>;
1432 // SSE2 packed instructions with XD prefix
1433 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1434 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1435 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1436 XD, Requires<[HasSSE2]>;
1437 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1438 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1439 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1440 (load addr:$src)))]>,
1441 XD, Requires<[HasSSE2]>;
1443 def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1444 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1445 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1446 def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1447 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1448 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1449 (load addr:$src)))]>;
1451 // SSE2 instructions without OpSize prefix
1452 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1453 "cvtps2pd\t{$src, $dst|$dst, $src}",
1454 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1455 TB, Requires<[HasSSE2]>;
1456 def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f64mem:$src),
1457 "cvtps2pd\t{$src, $dst|$dst, $src}",
1458 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1459 (load addr:$src)))]>,
1460 TB, Requires<[HasSSE2]>;
1462 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1463 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1464 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1465 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src),
1466 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1467 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1468 (load addr:$src)))]>;
1470 // Match intrinsics which expect XMM operand(s).
1471 // Aliases for intrinsics
1472 let isTwoAddress = 1 in {
1473 def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
1474 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
1475 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1476 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1478 def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
1479 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
1480 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
1481 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1482 (loadi32 addr:$src2)))]>;
1483 def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
1484 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1485 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1486 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1488 def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
1489 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1490 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1491 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1492 (load addr:$src2)))]>;
1493 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1494 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1495 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1496 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1497 VR128:$src2))]>, XS,
1498 Requires<[HasSSE2]>;
1499 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1500 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1501 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1502 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1503 (load addr:$src2)))]>, XS,
1504 Requires<[HasSSE2]>;
1509 /// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1511 /// In addition, we also have a special variant of the scalar form here to
1512 /// represent the associated intrinsic operation. This form is unlike the
1513 /// plain scalar form, in that it takes an entire vector (instead of a
1514 /// scalar) and leaves the top elements undefined.
1516 /// And, we have a special variant form for a full-vector intrinsic form.
1518 /// These four forms can each have a reg or a mem operand, so there are a
1519 /// total of eight "instructions".
1521 multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1525 bit Commutable = 0> {
1526 // Scalar operation, reg.
1527 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1528 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1529 [(set FR64:$dst, (OpNode FR64:$src))]> {
1530 let isCommutable = Commutable;
1533 // Scalar operation, mem.
1534 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1535 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1536 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1538 // Vector operation, reg.
1539 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1540 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1541 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1542 let isCommutable = Commutable;
1545 // Vector operation, mem.
1546 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1547 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1548 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1550 // Intrinsic operation, reg.
1551 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1552 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1553 [(set VR128:$dst, (F64Int VR128:$src))]> {
1554 let isCommutable = Commutable;
1557 // Intrinsic operation, mem.
1558 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1559 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1560 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1562 // Vector intrinsic operation, reg
1563 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1564 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1565 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1566 let isCommutable = Commutable;
1569 // Vector intrinsic operation, mem
1570 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1571 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1572 [(set VR128:$dst, (V2F64Int (load addr:$src)))]>;
1576 defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1577 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1579 // There is no f64 version of the reciprocal approximation instructions.
1582 let isTwoAddress = 1 in {
1583 let isCommutable = 1 in {
1584 def ANDPDrr : PDI<0x54, MRMSrcReg,
1585 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1586 "andpd\t{$src2, $dst|$dst, $src2}",
1588 (and (bc_v2i64 (v2f64 VR128:$src1)),
1589 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1590 def ORPDrr : PDI<0x56, MRMSrcReg,
1591 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1592 "orpd\t{$src2, $dst|$dst, $src2}",
1594 (or (bc_v2i64 (v2f64 VR128:$src1)),
1595 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1596 def XORPDrr : PDI<0x57, MRMSrcReg,
1597 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1598 "xorpd\t{$src2, $dst|$dst, $src2}",
1600 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1601 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1604 def ANDPDrm : PDI<0x54, MRMSrcMem,
1605 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1606 "andpd\t{$src2, $dst|$dst, $src2}",
1608 (and (bc_v2i64 (v2f64 VR128:$src1)),
1609 (memopv2i64 addr:$src2)))]>;
1610 def ORPDrm : PDI<0x56, MRMSrcMem,
1611 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1612 "orpd\t{$src2, $dst|$dst, $src2}",
1614 (or (bc_v2i64 (v2f64 VR128:$src1)),
1615 (memopv2i64 addr:$src2)))]>;
1616 def XORPDrm : PDI<0x57, MRMSrcMem,
1617 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1618 "xorpd\t{$src2, $dst|$dst, $src2}",
1620 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1621 (memopv2i64 addr:$src2)))]>;
1622 def ANDNPDrr : PDI<0x55, MRMSrcReg,
1623 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1624 "andnpd\t{$src2, $dst|$dst, $src2}",
1626 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1627 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1628 def ANDNPDrm : PDI<0x55, MRMSrcMem,
1629 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
1630 "andnpd\t{$src2, $dst|$dst, $src2}",
1632 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1633 (memopv2i64 addr:$src2)))]>;
1636 let isTwoAddress = 1 in {
1637 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
1638 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1639 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1640 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1641 VR128:$src, imm:$cc))]>;
1642 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
1643 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
1644 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1645 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1646 (load addr:$src), imm:$cc))]>;
1649 // Shuffle and unpack instructions
1650 let isTwoAddress = 1 in {
1651 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
1652 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
1653 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1654 [(set VR128:$dst, (v2f64 (vector_shuffle
1655 VR128:$src1, VR128:$src2,
1656 SHUFP_shuffle_mask:$src3)))]>;
1657 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
1658 (outs VR128:$dst), (ins VR128:$src1,
1659 f128mem:$src2, i8imm:$src3),
1660 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1662 (v2f64 (vector_shuffle
1663 VR128:$src1, (memopv2f64 addr:$src2),
1664 SHUFP_shuffle_mask:$src3)))]>;
1666 let AddedComplexity = 10 in {
1667 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
1668 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1669 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1671 (v2f64 (vector_shuffle
1672 VR128:$src1, VR128:$src2,
1673 UNPCKH_shuffle_mask)))]>;
1674 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
1675 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1676 "unpckhpd\t{$src2, $dst|$dst, $src2}",
1678 (v2f64 (vector_shuffle
1679 VR128:$src1, (memopv2f64 addr:$src2),
1680 UNPCKH_shuffle_mask)))]>;
1682 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
1683 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1684 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1686 (v2f64 (vector_shuffle
1687 VR128:$src1, VR128:$src2,
1688 UNPCKL_shuffle_mask)))]>;
1689 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
1690 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
1691 "unpcklpd\t{$src2, $dst|$dst, $src2}",
1693 (v2f64 (vector_shuffle
1694 VR128:$src1, (memopv2f64 addr:$src2),
1695 UNPCKL_shuffle_mask)))]>;
1696 } // AddedComplexity
1700 //===----------------------------------------------------------------------===//
1701 // SSE integer instructions
1703 // Move Instructions
1704 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1705 "movdqa\t{$src, $dst|$dst, $src}", []>;
1707 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1708 "movdqa\t{$src, $dst|$dst, $src}",
1709 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
1710 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1711 "movdqa\t{$src, $dst|$dst, $src}",
1712 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
1714 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1715 "movdqu\t{$src, $dst|$dst, $src}",
1716 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
1717 XS, Requires<[HasSSE2]>;
1718 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1719 "movdqu\t{$src, $dst|$dst, $src}",
1720 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
1721 XS, Requires<[HasSSE2]>;
1723 // Intrinsic forms of MOVDQU load and store
1725 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1726 "movdqu\t{$src, $dst|$dst, $src}",
1727 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1728 XS, Requires<[HasSSE2]>;
1729 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
1730 "movdqu\t{$src, $dst|$dst, $src}",
1731 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1732 XS, Requires<[HasSSE2]>;
1734 let isTwoAddress = 1 in {
1736 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1737 bit Commutable = 0> {
1738 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1739 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1740 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1741 let isCommutable = Commutable;
1743 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1744 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1745 [(set VR128:$dst, (IntId VR128:$src1,
1746 (bitconvert (memopv2i64 addr:$src2))))]>;
1749 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1750 string OpcodeStr, Intrinsic IntId> {
1751 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1752 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1753 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
1754 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1755 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1756 [(set VR128:$dst, (IntId VR128:$src1,
1757 (bitconvert (memopv2i64 addr:$src2))))]>;
1758 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1759 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1760 [(set VR128:$dst, (IntId VR128:$src1,
1761 (scalar_to_vector (i32 imm:$src2))))]>;
1765 /// PDI_binop_rm - Simple SSE2 binary operator.
1766 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1767 ValueType OpVT, bit Commutable = 0> {
1768 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1769 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1770 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1771 let isCommutable = Commutable;
1773 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1774 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1775 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
1776 (bitconvert (memopv2i64 addr:$src2)))))]>;
1779 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1781 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1782 /// to collapse (bitconvert VT to VT) into its operand.
1784 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1785 bit Commutable = 0> {
1786 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1787 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1788 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1789 let isCommutable = Commutable;
1791 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1792 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1793 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
1798 // 128-bit Integer Arithmetic
1800 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1801 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1802 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1803 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1805 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1806 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1807 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1808 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1810 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1811 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1812 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1813 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1815 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1816 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1817 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1818 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1820 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1822 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1823 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1824 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1826 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1828 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1829 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1832 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1833 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1834 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1835 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1836 defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1839 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw", int_x86_sse2_psll_w>;
1840 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld", int_x86_sse2_psll_d>;
1841 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq", int_x86_sse2_psll_q>;
1843 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw", int_x86_sse2_psrl_w>;
1844 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld", int_x86_sse2_psrl_d>;
1845 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq", int_x86_sse2_psrl_q>;
1847 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw", int_x86_sse2_psra_w>;
1848 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad", int_x86_sse2_psra_d>;
1849 // PSRAQ doesn't exist in SSE[1-3].
1851 // 128-bit logical shifts.
1852 let isTwoAddress = 1 in {
1853 def PSLLDQri : PDIi8<0x73, MRM7r,
1854 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1855 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
1856 def PSRLDQri : PDIi8<0x73, MRM3r,
1857 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1858 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
1859 // PSRADQri doesn't exist in SSE[1-3].
1862 let Predicates = [HasSSE2] in {
1863 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1864 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1865 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1866 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1867 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1868 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1872 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1873 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1874 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1876 let isTwoAddress = 1 in {
1877 def PANDNrr : PDI<0xDF, MRMSrcReg,
1878 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1879 "pandn\t{$src2, $dst|$dst, $src2}",
1880 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1883 def PANDNrm : PDI<0xDF, MRMSrcMem,
1884 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1885 "pandn\t{$src2, $dst|$dst, $src2}",
1886 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1887 (memopv2i64 addr:$src2))))]>;
1890 // SSE2 Integer comparison
1891 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
1892 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
1893 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
1894 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
1895 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
1896 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
1898 // Pack instructions
1899 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
1900 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
1901 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
1903 // Shuffle and unpack instructions
1904 def PSHUFDri : PDIi8<0x70, MRMSrcReg,
1905 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
1906 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1907 [(set VR128:$dst, (v4i32 (vector_shuffle
1908 VR128:$src1, (undef),
1909 PSHUFD_shuffle_mask:$src2)))]>;
1910 def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
1911 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
1912 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1913 [(set VR128:$dst, (v4i32 (vector_shuffle
1914 (bc_v4i32(memopv2i64 addr:$src1)),
1916 PSHUFD_shuffle_mask:$src2)))]>;
1918 // SSE2 with ImmT == Imm8 and XS prefix.
1919 def PSHUFHWri : Ii8<0x70, MRMSrcReg,
1920 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
1921 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1922 [(set VR128:$dst, (v8i16 (vector_shuffle
1923 VR128:$src1, (undef),
1924 PSHUFHW_shuffle_mask:$src2)))]>,
1925 XS, Requires<[HasSSE2]>;
1926 def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
1927 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
1928 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1929 [(set VR128:$dst, (v8i16 (vector_shuffle
1930 (bc_v8i16 (memopv2i64 addr:$src1)),
1932 PSHUFHW_shuffle_mask:$src2)))]>,
1933 XS, Requires<[HasSSE2]>;
1935 // SSE2 with ImmT == Imm8 and XD prefix.
1936 def PSHUFLWri : Ii8<0x70, MRMSrcReg,
1937 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1938 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1939 [(set VR128:$dst, (v8i16 (vector_shuffle
1940 VR128:$src1, (undef),
1941 PSHUFLW_shuffle_mask:$src2)))]>,
1942 XD, Requires<[HasSSE2]>;
1943 def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
1944 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
1945 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1946 [(set VR128:$dst, (v8i16 (vector_shuffle
1947 (bc_v8i16 (memopv2i64 addr:$src1)),
1949 PSHUFLW_shuffle_mask:$src2)))]>,
1950 XD, Requires<[HasSSE2]>;
1953 let isTwoAddress = 1 in {
1954 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
1955 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1956 "punpcklbw\t{$src2, $dst|$dst, $src2}",
1958 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
1959 UNPCKL_shuffle_mask)))]>;
1960 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
1961 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1962 "punpcklbw\t{$src2, $dst|$dst, $src2}",
1964 (v16i8 (vector_shuffle VR128:$src1,
1965 (bc_v16i8 (memopv2i64 addr:$src2)),
1966 UNPCKL_shuffle_mask)))]>;
1967 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
1968 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1969 "punpcklwd\t{$src2, $dst|$dst, $src2}",
1971 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
1972 UNPCKL_shuffle_mask)))]>;
1973 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
1974 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1975 "punpcklwd\t{$src2, $dst|$dst, $src2}",
1977 (v8i16 (vector_shuffle VR128:$src1,
1978 (bc_v8i16 (memopv2i64 addr:$src2)),
1979 UNPCKL_shuffle_mask)))]>;
1980 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
1981 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1982 "punpckldq\t{$src2, $dst|$dst, $src2}",
1984 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
1985 UNPCKL_shuffle_mask)))]>;
1986 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
1987 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1988 "punpckldq\t{$src2, $dst|$dst, $src2}",
1990 (v4i32 (vector_shuffle VR128:$src1,
1991 (bc_v4i32 (memopv2i64 addr:$src2)),
1992 UNPCKL_shuffle_mask)))]>;
1993 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
1994 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1995 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
1997 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
1998 UNPCKL_shuffle_mask)))]>;
1999 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2000 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2001 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2003 (v2i64 (vector_shuffle VR128:$src1,
2004 (memopv2i64 addr:$src2),
2005 UNPCKL_shuffle_mask)))]>;
2007 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
2008 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2009 "punpckhbw\t{$src2, $dst|$dst, $src2}",
2011 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2012 UNPCKH_shuffle_mask)))]>;
2013 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
2014 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2015 "punpckhbw\t{$src2, $dst|$dst, $src2}",
2017 (v16i8 (vector_shuffle VR128:$src1,
2018 (bc_v16i8 (memopv2i64 addr:$src2)),
2019 UNPCKH_shuffle_mask)))]>;
2020 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
2021 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2022 "punpckhwd\t{$src2, $dst|$dst, $src2}",
2024 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2025 UNPCKH_shuffle_mask)))]>;
2026 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
2027 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2028 "punpckhwd\t{$src2, $dst|$dst, $src2}",
2030 (v8i16 (vector_shuffle VR128:$src1,
2031 (bc_v8i16 (memopv2i64 addr:$src2)),
2032 UNPCKH_shuffle_mask)))]>;
2033 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
2034 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2035 "punpckhdq\t{$src2, $dst|$dst, $src2}",
2037 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2038 UNPCKH_shuffle_mask)))]>;
2039 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
2040 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2041 "punpckhdq\t{$src2, $dst|$dst, $src2}",
2043 (v4i32 (vector_shuffle VR128:$src1,
2044 (bc_v4i32 (memopv2i64 addr:$src2)),
2045 UNPCKH_shuffle_mask)))]>;
2046 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2047 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2048 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2050 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2051 UNPCKH_shuffle_mask)))]>;
2052 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2053 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2054 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2056 (v2i64 (vector_shuffle VR128:$src1,
2057 (memopv2i64 addr:$src2),
2058 UNPCKH_shuffle_mask)))]>;
2062 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2063 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2064 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2065 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2066 (iPTR imm:$src2)))]>;
2067 let isTwoAddress = 1 in {
2068 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
2069 (outs VR128:$dst), (ins VR128:$src1,
2070 GR32:$src2, i32i8imm:$src3),
2071 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2073 (v8i16 (X86pinsrw (v8i16 VR128:$src1),
2074 GR32:$src2, (iPTR imm:$src3))))]>;
2075 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
2076 (outs VR128:$dst), (ins VR128:$src1,
2077 i16mem:$src2, i32i8imm:$src3),
2078 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2080 (v8i16 (X86pinsrw (v8i16 VR128:$src1),
2081 (i32 (anyext (loadi16 addr:$src2))),
2082 (iPTR imm:$src3))))]>;
2086 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2087 "pmovmskb\t{$src, $dst|$dst, $src}",
2088 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2090 // Conditional store
2092 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2093 "maskmovdqu\t{$mask, $src|$src, $mask}",
2094 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2096 // Non-temporal stores
2097 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2098 "movntpd\t{$src, $dst|$dst, $src}",
2099 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2100 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2101 "movntdq\t{$src, $dst|$dst, $src}",
2102 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2103 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2104 "movnti\t{$src, $dst|$dst, $src}",
2105 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2106 TB, Requires<[HasSSE2]>;
2109 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
2110 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
2111 TB, Requires<[HasSSE2]>;
2113 // Load, store, and memory fence
2114 def LFENCE : I<0xAE, MRM5m, (outs), (ins),
2115 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
2116 def MFENCE : I<0xAE, MRM6m, (outs), (ins),
2117 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2120 // Alias instructions that map zero vector to pxor / xorp* for sse.
2121 let isReMaterializable = 1 in
2122 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
2123 "pcmpeqd\t$dst, $dst",
2124 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
2126 // FR64 to 128-bit vector conversion.
2127 def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
2128 "movsd\t{$src, $dst|$dst, $src}",
2130 (v2f64 (scalar_to_vector FR64:$src)))]>;
2131 def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2132 "movsd\t{$src, $dst|$dst, $src}",
2134 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2136 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2137 "movd\t{$src, $dst|$dst, $src}",
2139 (v4i32 (scalar_to_vector GR32:$src)))]>;
2140 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2141 "movd\t{$src, $dst|$dst, $src}",
2143 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2145 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2146 "movd\t{$src, $dst|$dst, $src}",
2147 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2149 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2150 "movd\t{$src, $dst|$dst, $src}",
2151 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2153 // SSE2 instructions with XS prefix
2154 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2155 "movq\t{$src, $dst|$dst, $src}",
2157 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2158 Requires<[HasSSE2]>;
2159 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2160 "movq\t{$src, $dst|$dst, $src}",
2161 [(store (i64 (vector_extract (v2i64 VR128:$src),
2162 (iPTR 0))), addr:$dst)]>;
2164 // FIXME: may not be able to eliminate this movss with coalescing the src and
2165 // dest register classes are different. We really want to write this pattern
2167 // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2168 // (f32 FR32:$src)>;
2169 def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
2170 "movsd\t{$src, $dst|$dst, $src}",
2171 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2173 def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
2174 "movsd\t{$src, $dst|$dst, $src}",
2175 [(store (f64 (vector_extract (v2f64 VR128:$src),
2176 (iPTR 0))), addr:$dst)]>;
2177 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2178 "movd\t{$src, $dst|$dst, $src}",
2179 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2181 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2182 "movd\t{$src, $dst|$dst, $src}",
2183 [(store (i32 (vector_extract (v4i32 VR128:$src),
2184 (iPTR 0))), addr:$dst)]>;
2186 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2187 "movd\t{$src, $dst|$dst, $src}",
2188 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2189 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2190 "movd\t{$src, $dst|$dst, $src}",
2191 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2194 // Move to lower bits of a VR128, leaving upper bits alone.
2195 // Three operand (but two address) aliases.
2196 let isTwoAddress = 1 in {
2197 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
2198 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
2199 "movsd\t{$src2, $dst|$dst, $src2}", []>;
2201 let AddedComplexity = 15 in
2202 def MOVLPDrr : SDI<0x10, MRMSrcReg,
2203 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2204 "movsd\t{$src2, $dst|$dst, $src2}",
2206 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2207 MOVL_shuffle_mask)))]>;
2210 // Store / copy lower 64-bits of a XMM register.
2211 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2212 "movq\t{$src, $dst|$dst, $src}",
2213 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2215 // Move to lower bits of a VR128 and zeroing upper bits.
2216 // Loading from memory automatically zeroing upper bits.
2217 let AddedComplexity = 20 in
2218 def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2219 "movsd\t{$src, $dst|$dst, $src}",
2221 (v2f64 (vector_shuffle immAllZerosV_bc,
2222 (v2f64 (scalar_to_vector
2223 (loadf64 addr:$src))),
2224 MOVL_shuffle_mask)))]>;
2226 let AddedComplexity = 15 in
2227 // movd / movq to XMM register zero-extends
2228 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2229 "movd\t{$src, $dst|$dst, $src}",
2231 (v4i32 (vector_shuffle immAllZerosV,
2232 (v4i32 (scalar_to_vector GR32:$src)),
2233 MOVL_shuffle_mask)))]>;
2234 let AddedComplexity = 20 in
2235 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2236 "movd\t{$src, $dst|$dst, $src}",
2238 (v4i32 (vector_shuffle immAllZerosV,
2239 (v4i32 (scalar_to_vector (loadi32 addr:$src))),
2240 MOVL_shuffle_mask)))]>;
2242 // Moving from XMM to XMM but still clear upper 64 bits.
2243 let AddedComplexity = 15 in
2244 def MOVZQI2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2245 "movq\t{$src, $dst|$dst, $src}",
2246 [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>,
2247 XS, Requires<[HasSSE2]>;
2248 let AddedComplexity = 20 in
2249 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2250 "movq\t{$src, $dst|$dst, $src}",
2251 [(set VR128:$dst, (int_x86_sse2_movl_dq
2252 (bitconvert (memopv2i64 addr:$src))))]>,
2253 XS, Requires<[HasSSE2]>;
2256 //===----------------------------------------------------------------------===//
2257 // SSE3 Instructions
2258 //===----------------------------------------------------------------------===//
2260 // Move Instructions
2261 def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2262 "movshdup\t{$src, $dst|$dst, $src}",
2263 [(set VR128:$dst, (v4f32 (vector_shuffle
2264 VR128:$src, (undef),
2265 MOVSHDUP_shuffle_mask)))]>;
2266 def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2267 "movshdup\t{$src, $dst|$dst, $src}",
2268 [(set VR128:$dst, (v4f32 (vector_shuffle
2269 (memopv4f32 addr:$src), (undef),
2270 MOVSHDUP_shuffle_mask)))]>;
2272 def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2273 "movsldup\t{$src, $dst|$dst, $src}",
2274 [(set VR128:$dst, (v4f32 (vector_shuffle
2275 VR128:$src, (undef),
2276 MOVSLDUP_shuffle_mask)))]>;
2277 def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2278 "movsldup\t{$src, $dst|$dst, $src}",
2279 [(set VR128:$dst, (v4f32 (vector_shuffle
2280 (memopv4f32 addr:$src), (undef),
2281 MOVSLDUP_shuffle_mask)))]>;
2283 def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2284 "movddup\t{$src, $dst|$dst, $src}",
2285 [(set VR128:$dst, (v2f64 (vector_shuffle
2286 VR128:$src, (undef),
2287 SSE_splat_lo_mask)))]>;
2288 def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2289 "movddup\t{$src, $dst|$dst, $src}",
2291 (v2f64 (vector_shuffle
2292 (scalar_to_vector (loadf64 addr:$src)),
2294 SSE_splat_lo_mask)))]>;
2297 let isTwoAddress = 1 in {
2298 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
2299 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2300 "addsubps\t{$src2, $dst|$dst, $src2}",
2301 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2303 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
2304 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2305 "addsubps\t{$src2, $dst|$dst, $src2}",
2306 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2307 (load addr:$src2)))]>;
2308 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
2309 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2310 "addsubpd\t{$src2, $dst|$dst, $src2}",
2311 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2313 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
2314 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2315 "addsubpd\t{$src2, $dst|$dst, $src2}",
2316 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2317 (load addr:$src2)))]>;
2320 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2321 "lddqu\t{$src, $dst|$dst, $src}",
2322 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2325 class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2326 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2327 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2328 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2329 class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2330 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2331 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2332 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (load addr:$src2))))]>;
2333 class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
2334 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2335 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2336 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2337 class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
2338 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
2339 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2340 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (load addr:$src2))))]>;
2342 let isTwoAddress = 1 in {
2343 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2344 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2345 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2346 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2347 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2348 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2349 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2350 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2353 // Thread synchronization
2354 def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
2355 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
2356 def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
2357 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2359 // vector_shuffle v1, <undef> <1, 1, 3, 3>
2360 let AddedComplexity = 15 in
2361 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2362 MOVSHDUP_shuffle_mask)),
2363 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2364 let AddedComplexity = 20 in
2365 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2366 MOVSHDUP_shuffle_mask)),
2367 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2369 // vector_shuffle v1, <undef> <0, 0, 2, 2>
2370 let AddedComplexity = 15 in
2371 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2372 MOVSLDUP_shuffle_mask)),
2373 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2374 let AddedComplexity = 20 in
2375 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
2376 MOVSLDUP_shuffle_mask)),
2377 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2379 //===----------------------------------------------------------------------===//
2380 // SSSE3 Instructions
2381 //===----------------------------------------------------------------------===//
2383 // SSSE3 Instruction Templates:
2385 // SS38I - SSSE3 instructions with T8 prefix.
2386 // SS3AI - SSSE3 instructions with TA prefix.
2388 // Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version
2389 // uses the MMX registers. We put those instructions here because they better
2390 // fit into the SSSE3 instruction category rather than the MMX category.
2392 class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
2394 : I<o, F, outs, ins, asm, pattern>, T8, Requires<[HasSSSE3]>;
2395 class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
2397 : I<o, F, outs, ins, asm, pattern>, TA, Requires<[HasSSSE3]>;
2399 /// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
2400 let isTwoAddress = 1 in {
2401 multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2402 Intrinsic IntId64, Intrinsic IntId128,
2403 bit Commutable = 0> {
2404 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2405 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2406 [(set VR64:$dst, (IntId64 VR64:$src))]> {
2407 let isCommutable = Commutable;
2409 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2410 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2412 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2414 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2416 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2417 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2419 let isCommutable = Commutable;
2421 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2423 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2426 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
2430 /// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
2431 let isTwoAddress = 1 in {
2432 multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2433 Intrinsic IntId64, Intrinsic IntId128,
2434 bit Commutable = 0> {
2435 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2437 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2438 [(set VR64:$dst, (IntId64 VR64:$src))]> {
2439 let isCommutable = Commutable;
2441 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2443 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2446 (bitconvert (memopv4i16 addr:$src))))]>;
2448 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2450 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2451 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2453 let isCommutable = Commutable;
2455 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2457 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2460 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
2464 /// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
2465 let isTwoAddress = 1 in {
2466 multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2467 Intrinsic IntId64, Intrinsic IntId128,
2468 bit Commutable = 0> {
2469 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2471 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2472 [(set VR64:$dst, (IntId64 VR64:$src))]> {
2473 let isCommutable = Commutable;
2475 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2477 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2480 (bitconvert (memopv2i32 addr:$src))))]>;
2482 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2484 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2485 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2487 let isCommutable = Commutable;
2489 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2491 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2494 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
2498 defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2499 int_x86_ssse3_pabs_b,
2500 int_x86_ssse3_pabs_b_128>;
2501 defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2502 int_x86_ssse3_pabs_w,
2503 int_x86_ssse3_pabs_w_128>;
2504 defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2505 int_x86_ssse3_pabs_d,
2506 int_x86_ssse3_pabs_d_128>;
2508 /// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
2509 let isTwoAddress = 1 in {
2510 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2511 Intrinsic IntId64, Intrinsic IntId128,
2512 bit Commutable = 0> {
2513 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2514 (ins VR64:$src1, VR64:$src2),
2515 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2516 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2517 let isCommutable = Commutable;
2519 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2520 (ins VR64:$src1, i64mem:$src2),
2521 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2523 (IntId64 VR64:$src1,
2524 (bitconvert (memopv8i8 addr:$src2))))]>;
2526 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2527 (ins VR128:$src1, VR128:$src2),
2528 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2529 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2531 let isCommutable = Commutable;
2533 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2534 (ins VR128:$src1, i128mem:$src2),
2535 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2537 (IntId128 VR128:$src1,
2538 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2542 /// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
2543 let isTwoAddress = 1 in {
2544 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2545 Intrinsic IntId64, Intrinsic IntId128,
2546 bit Commutable = 0> {
2547 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2548 (ins VR64:$src1, VR64:$src2),
2549 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2550 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2551 let isCommutable = Commutable;
2553 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2554 (ins VR64:$src1, i64mem:$src2),
2555 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2557 (IntId64 VR64:$src1,
2558 (bitconvert (memopv4i16 addr:$src2))))]>;
2560 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2561 (ins VR128:$src1, VR128:$src2),
2562 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2563 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2565 let isCommutable = Commutable;
2567 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2568 (ins VR128:$src1, i128mem:$src2),
2569 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2571 (IntId128 VR128:$src1,
2572 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2576 /// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
2577 let isTwoAddress = 1 in {
2578 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2579 Intrinsic IntId64, Intrinsic IntId128,
2580 bit Commutable = 0> {
2581 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2582 (ins VR64:$src1, VR64:$src2),
2583 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2584 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2585 let isCommutable = Commutable;
2587 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2588 (ins VR64:$src1, i64mem:$src2),
2589 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2591 (IntId64 VR64:$src1,
2592 (bitconvert (memopv2i32 addr:$src2))))]>;
2594 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2595 (ins VR128:$src1, VR128:$src2),
2596 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2597 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2599 let isCommutable = Commutable;
2601 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2602 (ins VR128:$src1, i128mem:$src2),
2603 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2605 (IntId128 VR128:$src1,
2606 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2610 defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2611 int_x86_ssse3_phadd_w,
2612 int_x86_ssse3_phadd_w_128, 1>;
2613 defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2614 int_x86_ssse3_phadd_d,
2615 int_x86_ssse3_phadd_d_128, 1>;
2616 defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2617 int_x86_ssse3_phadd_sw,
2618 int_x86_ssse3_phadd_sw_128, 1>;
2619 defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2620 int_x86_ssse3_phsub_w,
2621 int_x86_ssse3_phsub_w_128>;
2622 defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2623 int_x86_ssse3_phsub_d,
2624 int_x86_ssse3_phsub_d_128>;
2625 defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2626 int_x86_ssse3_phsub_sw,
2627 int_x86_ssse3_phsub_sw_128>;
2628 defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2629 int_x86_ssse3_pmadd_ub_sw,
2630 int_x86_ssse3_pmadd_ub_sw_128, 1>;
2631 defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2632 int_x86_ssse3_pmul_hr_sw,
2633 int_x86_ssse3_pmul_hr_sw_128, 1>;
2634 defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2635 int_x86_ssse3_pshuf_b,
2636 int_x86_ssse3_pshuf_b_128>;
2637 defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2638 int_x86_ssse3_psign_b,
2639 int_x86_ssse3_psign_b_128>;
2640 defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2641 int_x86_ssse3_psign_w,
2642 int_x86_ssse3_psign_w_128>;
2643 defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2644 int_x86_ssse3_psign_d,
2645 int_x86_ssse3_psign_d_128>;
2647 let isTwoAddress = 1 in {
2648 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2649 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
2650 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2652 (int_x86_ssse3_palign_r
2653 VR64:$src1, VR64:$src2,
2655 def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2656 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
2657 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2659 (int_x86_ssse3_palign_r
2661 (bitconvert (memopv2i32 addr:$src2)),
2664 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2665 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
2666 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2668 (int_x86_ssse3_palign_r_128
2669 VR128:$src1, VR128:$src2,
2670 imm:$src3))]>, OpSize;
2671 def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2672 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
2673 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2675 (int_x86_ssse3_palign_r_128
2677 (bitconvert (memopv4i32 addr:$src2)),
2678 imm:$src3))]>, OpSize;
2681 //===----------------------------------------------------------------------===//
2682 // Non-Instruction Patterns
2683 //===----------------------------------------------------------------------===//
2685 // 128-bit vector undef's.
2686 def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2687 def : Pat<(v2f64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2688 def : Pat<(v16i8 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2689 def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2690 def : Pat<(v4i32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2691 def : Pat<(v2i64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2693 // Scalar to v8i16 / v16i8. The source may be a GR32, but only the lower 8 or
2695 def : Pat<(v8i16 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
2696 Requires<[HasSSE2]>;
2697 def : Pat<(v16i8 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
2698 Requires<[HasSSE2]>;
2701 let Predicates = [HasSSE2] in {
2702 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2703 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2704 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2705 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2706 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2707 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2708 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2709 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2710 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2711 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2712 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2713 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2714 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2715 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2716 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2717 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2718 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2719 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2720 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2721 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2722 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2723 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2724 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2725 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2726 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2727 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2728 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2729 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2730 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2731 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2734 // Move scalar to XMM zero-extended
2735 // movd to XMM register zero-extends
2736 let AddedComplexity = 15 in {
2737 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
2738 def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc,
2739 (v2f64 (scalar_to_vector FR64:$src)), MOVL_shuffle_mask)),
2740 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
2741 def : Pat<(v4f32 (vector_shuffle immAllZerosV_bc,
2742 (v4f32 (scalar_to_vector FR32:$src)), MOVL_shuffle_mask)),
2743 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
2746 // Splat v2f64 / v2i64
2747 let AddedComplexity = 10 in {
2748 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2749 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2750 def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2751 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2752 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2753 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2754 def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2755 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2759 def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SSE_splat_mask:$sm),
2760 (SHUFPSrri VR128:$src, VR128:$src, SSE_splat_mask:$sm)>,
2761 Requires<[HasSSE1]>;
2763 // Special unary SHUFPSrri case.
2764 // FIXME: when we want non two-address code, then we should use PSHUFD?
2765 def : Pat<(vector_shuffle (v4f32 VR128:$src1), (undef),
2766 SHUFP_unary_shuffle_mask:$sm),
2767 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2768 Requires<[HasSSE1]>;
2769 // Special unary SHUFPDrri case.
2770 def : Pat<(vector_shuffle (v2f64 VR128:$src1), (undef),
2771 SHUFP_unary_shuffle_mask:$sm),
2772 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2773 Requires<[HasSSE2]>;
2774 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
2775 def : Pat<(vector_shuffle (memopv4f32 addr:$src1), (undef),
2776 SHUFP_unary_shuffle_mask:$sm),
2777 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2778 Requires<[HasSSE2]>;
2779 // Special binary v4i32 shuffle cases with SHUFPS.
2780 def : Pat<(vector_shuffle (v4i32 VR128:$src1), (v4i32 VR128:$src2),
2781 PSHUFD_binary_shuffle_mask:$sm),
2782 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2783 Requires<[HasSSE2]>;
2784 def : Pat<(vector_shuffle (v4i32 VR128:$src1),
2785 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm),
2786 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2787 Requires<[HasSSE2]>;
2789 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2790 let AddedComplexity = 10 in {
2791 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2792 UNPCKL_v_undef_shuffle_mask)),
2793 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2794 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2795 UNPCKL_v_undef_shuffle_mask)),
2796 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2797 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2798 UNPCKL_v_undef_shuffle_mask)),
2799 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2800 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2801 UNPCKL_v_undef_shuffle_mask)),
2802 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2805 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2806 let AddedComplexity = 10 in {
2807 def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2808 UNPCKH_v_undef_shuffle_mask)),
2809 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2810 def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2811 UNPCKH_v_undef_shuffle_mask)),
2812 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2813 def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2814 UNPCKH_v_undef_shuffle_mask)),
2815 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2816 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2817 UNPCKH_v_undef_shuffle_mask)),
2818 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2821 let AddedComplexity = 15 in {
2822 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2823 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2824 MOVHP_shuffle_mask)),
2825 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2827 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2828 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2829 MOVHLPS_shuffle_mask)),
2830 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2832 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
2833 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2834 MOVHLPS_v_undef_shuffle_mask)),
2835 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2836 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2837 MOVHLPS_v_undef_shuffle_mask)),
2838 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2841 let AddedComplexity = 20 in {
2842 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2843 // vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
2844 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
2845 MOVLP_shuffle_mask)),
2846 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2847 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
2848 MOVLP_shuffle_mask)),
2849 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2850 def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
2851 MOVHP_shuffle_mask)),
2852 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2853 def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
2854 MOVHP_shuffle_mask)),
2855 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2857 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
2858 MOVLP_shuffle_mask)),
2859 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2860 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
2861 MOVLP_shuffle_mask)),
2862 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2863 def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
2864 MOVHP_shuffle_mask)),
2865 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
2866 def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
2867 MOVLP_shuffle_mask)),
2868 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2871 let AddedComplexity = 15 in {
2872 // Setting the lowest element in the vector.
2873 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2874 MOVL_shuffle_mask)),
2875 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2876 def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2877 MOVL_shuffle_mask)),
2878 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2880 // vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
2881 def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
2882 MOVLP_shuffle_mask)),
2883 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2884 def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2885 MOVLP_shuffle_mask)),
2886 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2889 // Set lowest element and zero upper elements.
2890 let AddedComplexity = 20 in
2891 def : Pat<(bc_v2i64 (vector_shuffle immAllZerosV_bc,
2892 (v2f64 (scalar_to_vector (loadf64 addr:$src))),
2893 MOVL_shuffle_mask)),
2894 (MOVZQI2PQIrm addr:$src)>, Requires<[HasSSE2]>;
2896 // FIXME: Temporary workaround since 2-wide shuffle is broken.
2897 def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2),
2898 (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2899 def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2),
2900 (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2901 def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2),
2902 (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2903 def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3),
2904 (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>,
2905 Requires<[HasSSE2]>;
2906 def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3),
2907 (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>,
2908 Requires<[HasSSE2]>;
2909 def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2),
2910 (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2911 def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)),
2912 (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2913 def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2),
2914 (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2915 def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)),
2916 (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2917 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
2918 (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2919 def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)),
2920 (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2921 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
2922 (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2923 def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)),
2924 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2926 // Some special case pandn patterns.
2927 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2929 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2930 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2932 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2933 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2935 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2937 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2938 (memopv2i64 addr:$src2))),
2939 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2940 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2941 (memopv2i64 addr:$src2))),
2942 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2943 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2944 (memopv2i64 addr:$src2))),
2945 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2947 // vector -> vector casts
2948 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2949 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
2950 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2951 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
2953 // Use movaps / movups for SSE integer load / store (one byte shorter).
2954 def : Pat<(alignedloadv4i32 addr:$src),
2955 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
2956 def : Pat<(loadv4i32 addr:$src),
2957 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
2958 def : Pat<(alignedloadv2i64 addr:$src),
2959 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
2960 def : Pat<(loadv2i64 addr:$src),
2961 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
2963 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
2964 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2965 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
2966 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2967 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
2968 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2969 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
2970 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2971 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
2972 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2973 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
2974 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2975 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
2976 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
2977 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
2978 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;