1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE scalar FP Instructions
19 //===----------------------------------------------------------------------===//
21 // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
22 // instruction selection into a branch sequence.
23 let Uses = [EFLAGS], usesCustomInserter = 1 in {
24 def CMOV_FR32 : I<0, Pseudo,
25 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
27 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
29 def CMOV_FR64 : I<0, Pseudo,
30 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
32 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
34 def CMOV_V4F32 : I<0, Pseudo,
35 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
36 "#CMOV_V4F32 PSEUDO!",
38 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
40 def CMOV_V2F64 : I<0, Pseudo,
41 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
42 "#CMOV_V2F64 PSEUDO!",
44 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
46 def CMOV_V2I64 : I<0, Pseudo,
47 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
48 "#CMOV_V2I64 PSEUDO!",
50 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
54 //===----------------------------------------------------------------------===//
55 // SSE 1 & 2 Instructions Classes
56 //===----------------------------------------------------------------------===//
58 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
59 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
60 RegisterClass RC, X86MemOperand x86memop,
62 let isCommutable = 1 in {
63 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
65 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
66 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
67 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
69 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
71 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
72 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
73 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
76 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
77 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
78 string asm, string SSEVer, string FPSizeStr,
79 Operand memopr, ComplexPattern mem_cpat,
81 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
83 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
84 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
85 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
86 !strconcat(SSEVer, !strconcat("_",
87 !strconcat(OpcodeStr, FPSizeStr))))
88 RC:$src1, RC:$src2))]>;
89 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
91 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
92 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
93 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
94 !strconcat(SSEVer, !strconcat("_",
95 !strconcat(OpcodeStr, FPSizeStr))))
96 RC:$src1, mem_cpat:$src2))]>;
99 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
100 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
101 RegisterClass RC, ValueType vt,
102 X86MemOperand x86memop, PatFrag mem_frag,
103 Domain d, bit Is2Addr = 1> {
104 let isCommutable = 1 in
105 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
107 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
108 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
109 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
111 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
113 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
114 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
115 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
118 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
119 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
120 string OpcodeStr, X86MemOperand x86memop,
121 list<dag> pat_rr, list<dag> pat_rm,
123 let isCommutable = 1 in
124 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
126 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
127 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
129 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
131 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
132 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
136 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
137 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
138 string asm, string SSEVer, string FPSizeStr,
139 X86MemOperand x86memop, PatFrag mem_frag,
140 Domain d, bit Is2Addr = 1> {
141 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
143 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
144 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
145 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
146 !strconcat(SSEVer, !strconcat("_",
147 !strconcat(OpcodeStr, FPSizeStr))))
148 RC:$src1, RC:$src2))], d>;
149 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
151 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
152 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
153 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
154 !strconcat(SSEVer, !strconcat("_",
155 !strconcat(OpcodeStr, FPSizeStr))))
156 RC:$src1, (mem_frag addr:$src2)))], d>;
159 //===----------------------------------------------------------------------===//
160 // SSE 1 & 2 - Move Instructions
161 //===----------------------------------------------------------------------===//
163 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
164 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
165 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
167 // Loading from memory automatically zeroing upper bits.
168 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
169 PatFrag mem_pat, string OpcodeStr> :
170 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
171 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
172 [(set RC:$dst, (mem_pat addr:$src))]>;
174 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
175 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
176 // is used instead. Register-to-register movss/movsd is not modeled as an
177 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
178 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
179 let isAsmParserOnly = 1 in {
180 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
181 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
182 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
183 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
185 let canFoldAsLoad = 1, isReMaterializable = 1 in {
186 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
188 let AddedComplexity = 20 in
189 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
193 let Constraints = "$src1 = $dst" in {
194 def MOVSSrr : sse12_move_rr<FR32, v4f32,
195 "movss\t{$src2, $dst|$dst, $src2}">, XS;
196 def MOVSDrr : sse12_move_rr<FR64, v2f64,
197 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
200 let canFoldAsLoad = 1, isReMaterializable = 1 in {
201 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
203 let AddedComplexity = 20 in
204 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
207 let AddedComplexity = 15 in {
208 // Extract the low 32-bit value from one vector and insert it into another.
209 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
210 (MOVSSrr (v4f32 VR128:$src1),
211 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
212 // Extract the low 64-bit value from one vector and insert it into another.
213 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
214 (MOVSDrr (v2f64 VR128:$src1),
215 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
218 // Implicitly promote a 32-bit scalar to a vector.
219 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
220 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
221 // Implicitly promote a 64-bit scalar to a vector.
222 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
223 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
225 let AddedComplexity = 20 in {
226 // MOVSSrm zeros the high parts of the register; represent this
227 // with SUBREG_TO_REG.
228 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
229 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
230 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
231 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
232 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
233 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
234 // MOVSDrm zeros the high parts of the register; represent this
235 // with SUBREG_TO_REG.
236 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
237 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
238 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
239 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
240 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
241 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
242 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
243 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
244 def : Pat<(v2f64 (X86vzload addr:$src)),
245 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
248 // Store scalar value to memory.
249 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
250 "movss\t{$src, $dst|$dst, $src}",
251 [(store FR32:$src, addr:$dst)]>;
252 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
253 "movsd\t{$src, $dst|$dst, $src}",
254 [(store FR64:$src, addr:$dst)]>;
256 let isAsmParserOnly = 1 in {
257 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
258 "movss\t{$src, $dst|$dst, $src}",
259 [(store FR32:$src, addr:$dst)]>, XS, VEX;
260 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
261 "movsd\t{$src, $dst|$dst, $src}",
262 [(store FR64:$src, addr:$dst)]>, XD, VEX;
265 // Extract and store.
266 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
269 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
270 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
273 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
275 // Move Aligned/Unaligned floating point values
276 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
277 X86MemOperand x86memop, PatFrag ld_frag,
278 string asm, Domain d,
279 bit IsReMaterializable = 1> {
280 let neverHasSideEffects = 1 in
281 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
282 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
283 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
284 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
285 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
286 [(set RC:$dst, (ld_frag addr:$src))], d>;
289 let isAsmParserOnly = 1 in {
290 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
291 "movaps", SSEPackedSingle>, VEX;
292 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
293 "movapd", SSEPackedDouble>, OpSize, VEX;
294 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
295 "movups", SSEPackedSingle>, VEX;
296 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
297 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
299 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
300 "movaps", SSEPackedSingle>, VEX;
301 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
302 "movapd", SSEPackedDouble>, OpSize, VEX;
303 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
304 "movups", SSEPackedSingle>, VEX;
305 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
306 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
308 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
309 "movaps", SSEPackedSingle>, TB;
310 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
311 "movapd", SSEPackedDouble>, TB, OpSize;
312 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
313 "movups", SSEPackedSingle>, TB;
314 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
315 "movupd", SSEPackedDouble, 0>, TB, OpSize;
317 let isAsmParserOnly = 1 in {
318 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
319 "movaps\t{$src, $dst|$dst, $src}",
320 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
321 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
322 "movapd\t{$src, $dst|$dst, $src}",
323 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
324 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
325 "movups\t{$src, $dst|$dst, $src}",
326 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
327 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
328 "movupd\t{$src, $dst|$dst, $src}",
329 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
330 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
331 "movaps\t{$src, $dst|$dst, $src}",
332 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
333 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
334 "movapd\t{$src, $dst|$dst, $src}",
335 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
336 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
337 "movups\t{$src, $dst|$dst, $src}",
338 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
339 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
340 "movupd\t{$src, $dst|$dst, $src}",
341 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
344 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
345 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
346 (VMOVUPSYmr addr:$dst, VR256:$src)>;
348 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
349 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
350 (VMOVUPDYmr addr:$dst, VR256:$src)>;
352 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
353 "movaps\t{$src, $dst|$dst, $src}",
354 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
355 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
356 "movapd\t{$src, $dst|$dst, $src}",
357 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
358 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
359 "movups\t{$src, $dst|$dst, $src}",
360 [(store (v4f32 VR128:$src), addr:$dst)]>;
361 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
362 "movupd\t{$src, $dst|$dst, $src}",
363 [(store (v2f64 VR128:$src), addr:$dst)]>;
365 // Intrinsic forms of MOVUPS/D load and store
366 let isAsmParserOnly = 1 in {
367 let canFoldAsLoad = 1, isReMaterializable = 1 in
368 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
370 "movups\t{$src, $dst|$dst, $src}",
371 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
372 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
374 "movupd\t{$src, $dst|$dst, $src}",
375 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
376 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
377 (ins f128mem:$dst, VR128:$src),
378 "movups\t{$src, $dst|$dst, $src}",
379 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
380 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
381 (ins f128mem:$dst, VR128:$src),
382 "movupd\t{$src, $dst|$dst, $src}",
383 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
385 let canFoldAsLoad = 1, isReMaterializable = 1 in
386 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
387 "movups\t{$src, $dst|$dst, $src}",
388 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
389 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
390 "movupd\t{$src, $dst|$dst, $src}",
391 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
393 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
394 "movups\t{$src, $dst|$dst, $src}",
395 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
396 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
397 "movupd\t{$src, $dst|$dst, $src}",
398 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
400 // Move Low/High packed floating point values
401 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
402 PatFrag mov_frag, string base_opc,
404 def PSrm : PI<opc, MRMSrcMem,
405 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
406 !strconcat(!strconcat(base_opc,"s"), asm_opr),
409 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
410 SSEPackedSingle>, TB;
412 def PDrm : PI<opc, MRMSrcMem,
413 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
414 !strconcat(!strconcat(base_opc,"d"), asm_opr),
415 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
416 (scalar_to_vector (loadf64 addr:$src2)))))],
417 SSEPackedDouble>, TB, OpSize;
420 let isAsmParserOnly = 1, AddedComplexity = 20 in {
421 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
422 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
423 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
424 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
426 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
427 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
428 "\t{$src2, $dst|$dst, $src2}">;
429 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
430 "\t{$src2, $dst|$dst, $src2}">;
433 let isAsmParserOnly = 1 in {
434 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
435 "movlps\t{$src, $dst|$dst, $src}",
436 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
437 (iPTR 0))), addr:$dst)]>, VEX;
438 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movlpd\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract (v2f64 VR128:$src),
441 (iPTR 0))), addr:$dst)]>, VEX;
443 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
444 "movlps\t{$src, $dst|$dst, $src}",
445 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
446 (iPTR 0))), addr:$dst)]>;
447 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
448 "movlpd\t{$src, $dst|$dst, $src}",
449 [(store (f64 (vector_extract (v2f64 VR128:$src),
450 (iPTR 0))), addr:$dst)]>;
452 // v2f64 extract element 1 is always custom lowered to unpack high to low
453 // and extract element 0 so the non-store version isn't too horrible.
454 let isAsmParserOnly = 1 in {
455 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
456 "movhps\t{$src, $dst|$dst, $src}",
457 [(store (f64 (vector_extract
458 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
459 (undef)), (iPTR 0))), addr:$dst)]>,
461 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
462 "movhpd\t{$src, $dst|$dst, $src}",
463 [(store (f64 (vector_extract
464 (v2f64 (unpckh VR128:$src, (undef))),
465 (iPTR 0))), addr:$dst)]>,
468 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
469 "movhps\t{$src, $dst|$dst, $src}",
470 [(store (f64 (vector_extract
471 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
472 (undef)), (iPTR 0))), addr:$dst)]>;
473 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
474 "movhpd\t{$src, $dst|$dst, $src}",
475 [(store (f64 (vector_extract
476 (v2f64 (unpckh VR128:$src, (undef))),
477 (iPTR 0))), addr:$dst)]>;
479 let isAsmParserOnly = 1, AddedComplexity = 20 in {
480 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
481 (ins VR128:$src1, VR128:$src2),
482 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
484 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
486 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
487 (ins VR128:$src1, VR128:$src2),
488 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
490 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
493 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
494 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
495 (ins VR128:$src1, VR128:$src2),
496 "movlhps\t{$src2, $dst|$dst, $src2}",
498 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
499 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
500 (ins VR128:$src1, VR128:$src2),
501 "movhlps\t{$src2, $dst|$dst, $src2}",
503 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
506 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
507 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
508 let AddedComplexity = 20 in {
509 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
510 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
511 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
512 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
515 //===----------------------------------------------------------------------===//
516 // SSE 1 & 2 - Conversion Instructions
517 //===----------------------------------------------------------------------===//
519 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
520 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
522 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
523 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
524 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
525 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
528 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
529 X86MemOperand x86memop, string asm> {
530 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
532 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
536 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
537 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
538 string asm, Domain d> {
539 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
540 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
541 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
542 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
545 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
546 X86MemOperand x86memop, string asm> {
547 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
548 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
549 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
550 (ins DstRC:$src1, x86memop:$src),
551 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
554 let isAsmParserOnly = 1 in {
555 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
556 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
557 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
558 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
560 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
561 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
562 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
563 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
566 // The assembler can recognize rr 64-bit instructions by seeing a rxx
567 // register, but the same isn't true when only using memory operands,
568 // provide other assembly "l" and "q" forms to address this explicitly
569 // where appropriate to do so.
570 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
572 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
574 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
576 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
578 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
582 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
583 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
584 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
585 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
586 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
587 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
588 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
589 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
590 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
591 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
592 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
593 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
594 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
595 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
596 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
597 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
599 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
600 // and/or XMM operand(s).
602 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
603 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
605 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
606 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
607 [(set DstRC:$dst, (Int SrcRC:$src))]>;
608 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
609 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
610 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
613 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
614 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
615 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
616 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
618 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
619 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
620 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
621 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
622 (ins DstRC:$src1, x86memop:$src2),
624 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
625 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
626 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
629 let isAsmParserOnly = 1 in {
630 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
631 f32mem, load, "cvtss2si">, XS, VEX;
632 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
633 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
635 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
636 f128mem, load, "cvtsd2si">, XD, VEX;
637 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
638 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
641 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
642 // Get rid of this hack or rename the intrinsics, there are several
643 // intructions that only match with the intrinsic form, why create duplicates
644 // to let them be recognized by the assembler?
645 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
646 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
647 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
648 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
650 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
651 f32mem, load, "cvtss2si">, XS;
652 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
653 f32mem, load, "cvtss2si{q}">, XS, REX_W;
654 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
655 f128mem, load, "cvtsd2si{l}">, XD;
656 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
657 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
660 let isAsmParserOnly = 1 in {
661 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
662 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
663 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
664 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
666 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
667 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
668 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
669 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
673 let Constraints = "$src1 = $dst" in {
674 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
675 int_x86_sse_cvtsi2ss, i32mem, loadi32,
677 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
678 int_x86_sse_cvtsi642ss, i64mem, loadi64,
679 "cvtsi2ss{q}">, XS, REX_W;
680 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
681 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
683 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
684 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
685 "cvtsi2sd">, XD, REX_W;
690 // Aliases for intrinsics
691 let isAsmParserOnly = 1 in {
692 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
693 f32mem, load, "cvttss2si">, XS, VEX;
694 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
695 int_x86_sse_cvttss2si64, f32mem, load,
696 "cvttss2si">, XS, VEX, VEX_W;
697 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
698 f128mem, load, "cvttss2si">, XD, VEX;
699 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
700 int_x86_sse2_cvttsd2si64, f128mem, load,
701 "cvttss2si">, XD, VEX, VEX_W;
703 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
704 f32mem, load, "cvttss2si">, XS;
705 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
706 int_x86_sse_cvttss2si64, f32mem, load,
707 "cvttss2si{q}">, XS, REX_W;
708 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
709 f128mem, load, "cvttss2si">, XD;
710 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
711 int_x86_sse2_cvttsd2si64, f128mem, load,
712 "cvttss2si{q}">, XD, REX_W;
714 let isAsmParserOnly = 1, Pattern = []<dag> in {
715 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
716 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
717 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
718 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
720 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
721 "cvtdq2ps\t{$src, $dst|$dst, $src}",
722 SSEPackedSingle>, TB, VEX;
723 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
724 "cvtdq2ps\t{$src, $dst|$dst, $src}",
725 SSEPackedSingle>, TB, VEX;
727 let Pattern = []<dag> in {
728 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
729 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
730 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
731 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
732 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
733 "cvtdq2ps\t{$src, $dst|$dst, $src}",
734 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
739 // Convert scalar double to scalar single
740 let isAsmParserOnly = 1 in {
741 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
742 (ins FR64:$src1, FR64:$src2),
743 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
745 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
746 (ins FR64:$src1, f64mem:$src2),
747 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
748 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
750 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
751 "cvtsd2ss\t{$src, $dst|$dst, $src}",
752 [(set FR32:$dst, (fround FR64:$src))]>;
753 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
754 "cvtsd2ss\t{$src, $dst|$dst, $src}",
755 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
756 Requires<[HasSSE2, OptForSize]>;
758 let isAsmParserOnly = 1 in
759 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
760 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
762 let Constraints = "$src1 = $dst" in
763 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
764 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
766 // Convert scalar single to scalar double
767 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
768 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
769 (ins FR32:$src1, FR32:$src2),
770 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
771 []>, XS, Requires<[HasAVX]>, VEX_4V;
772 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
773 (ins FR32:$src1, f32mem:$src2),
774 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
775 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
777 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
778 "cvtss2sd\t{$src, $dst|$dst, $src}",
779 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
781 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
782 "cvtss2sd\t{$src, $dst|$dst, $src}",
783 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
784 Requires<[HasSSE2, OptForSize]>;
786 let isAsmParserOnly = 1 in {
787 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
788 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
789 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
790 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
791 VR128:$src2))]>, XS, VEX_4V,
793 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
794 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
795 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
796 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
797 (load addr:$src2)))]>, XS, VEX_4V,
800 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
801 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
802 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
803 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
804 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
807 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
808 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
809 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
810 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
811 (load addr:$src2)))]>, XS,
815 def : Pat<(extloadf32 addr:$src),
816 (CVTSS2SDrr (MOVSSrm addr:$src))>,
817 Requires<[HasSSE2, OptForSpeed]>;
819 // Convert doubleword to packed single/double fp
820 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
821 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
822 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
823 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
824 TB, VEX, Requires<[HasAVX]>;
825 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
826 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
827 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
828 (bitconvert (memopv2i64 addr:$src))))]>,
829 TB, VEX, Requires<[HasAVX]>;
831 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
832 "cvtdq2ps\t{$src, $dst|$dst, $src}",
833 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
834 TB, Requires<[HasSSE2]>;
835 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
836 "cvtdq2ps\t{$src, $dst|$dst, $src}",
837 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
838 (bitconvert (memopv2i64 addr:$src))))]>,
839 TB, Requires<[HasSSE2]>;
841 // FIXME: why the non-intrinsic version is described as SSE3?
842 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
843 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
844 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
845 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
846 XS, VEX, Requires<[HasAVX]>;
847 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
848 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
849 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
850 (bitconvert (memopv2i64 addr:$src))))]>,
851 XS, VEX, Requires<[HasAVX]>;
853 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
854 "cvtdq2pd\t{$src, $dst|$dst, $src}",
855 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
856 XS, Requires<[HasSSE2]>;
857 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
858 "cvtdq2pd\t{$src, $dst|$dst, $src}",
859 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
860 (bitconvert (memopv2i64 addr:$src))))]>,
861 XS, Requires<[HasSSE2]>;
864 // Convert packed single/double fp to doubleword
865 let isAsmParserOnly = 1 in {
866 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
867 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
868 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
869 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
870 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
871 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
872 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
873 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
875 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
876 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
877 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
878 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
880 let isAsmParserOnly = 1 in {
881 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
882 "cvtps2dq\t{$src, $dst|$dst, $src}",
883 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
885 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
887 "cvtps2dq\t{$src, $dst|$dst, $src}",
888 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
889 (memop addr:$src)))]>, VEX;
891 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
892 "cvtps2dq\t{$src, $dst|$dst, $src}",
893 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
894 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
895 "cvtps2dq\t{$src, $dst|$dst, $src}",
896 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
897 (memop addr:$src)))]>;
899 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
900 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
901 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
902 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
903 XD, VEX, Requires<[HasAVX]>;
904 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
905 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
906 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
907 (memop addr:$src)))]>,
908 XD, VEX, Requires<[HasAVX]>;
910 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
911 "cvtpd2dq\t{$src, $dst|$dst, $src}",
912 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
913 XD, Requires<[HasSSE2]>;
914 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
915 "cvtpd2dq\t{$src, $dst|$dst, $src}",
916 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
917 (memop addr:$src)))]>,
918 XD, Requires<[HasSSE2]>;
921 // Convert with truncation packed single/double fp to doubleword
922 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
923 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
924 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
925 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
926 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
927 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
928 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
929 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
930 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
932 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
933 "cvttps2dq\t{$src, $dst|$dst, $src}",
935 (int_x86_sse2_cvttps2dq VR128:$src))]>;
936 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
937 "cvttps2dq\t{$src, $dst|$dst, $src}",
939 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
942 let isAsmParserOnly = 1 in {
943 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
944 "vcvttps2dq\t{$src, $dst|$dst, $src}",
946 (int_x86_sse2_cvttps2dq VR128:$src))]>,
947 XS, VEX, Requires<[HasAVX]>;
948 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
949 "vcvttps2dq\t{$src, $dst|$dst, $src}",
950 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
951 (memop addr:$src)))]>,
952 XS, VEX, Requires<[HasAVX]>;
955 let isAsmParserOnly = 1 in {
956 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
958 "cvttpd2dq\t{$src, $dst|$dst, $src}",
959 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
961 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
963 "cvttpd2dq\t{$src, $dst|$dst, $src}",
964 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
965 (memop addr:$src)))]>, VEX;
967 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
968 "cvttpd2dq\t{$src, $dst|$dst, $src}",
969 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
970 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
971 "cvttpd2dq\t{$src, $dst|$dst, $src}",
972 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
973 (memop addr:$src)))]>;
975 let isAsmParserOnly = 1 in {
976 // The assembler can recognize rr 256-bit instructions by seeing a ymm
977 // register, but the same isn't true when using memory operands instead.
978 // Provide other assembly rr and rm forms to address this explicitly.
979 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
980 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
981 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
982 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
985 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
986 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
987 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
988 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
991 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
992 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
993 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
994 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
997 // Convert packed single to packed double
998 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
999 // SSE2 instructions without OpSize prefix
1000 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1001 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1002 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1003 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1004 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1005 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1006 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1007 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1009 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1010 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1011 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1012 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1014 let isAsmParserOnly = 1 in {
1015 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1016 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1017 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1018 VEX, Requires<[HasAVX]>;
1019 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1020 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1021 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1022 (load addr:$src)))]>,
1023 VEX, Requires<[HasAVX]>;
1025 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1026 "cvtps2pd\t{$src, $dst|$dst, $src}",
1027 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1028 TB, Requires<[HasSSE2]>;
1029 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1030 "cvtps2pd\t{$src, $dst|$dst, $src}",
1031 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1032 (load addr:$src)))]>,
1033 TB, Requires<[HasSSE2]>;
1035 // Convert packed double to packed single
1036 let isAsmParserOnly = 1 in {
1037 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1038 // register, but the same isn't true when using memory operands instead.
1039 // Provide other assembly rr and rm forms to address this explicitly.
1040 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1041 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1042 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1043 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1046 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1047 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1048 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1049 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1052 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1053 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1054 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1055 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1057 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1058 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1059 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1060 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1063 let isAsmParserOnly = 1 in {
1064 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1065 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1066 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1067 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1069 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1070 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1071 (memop addr:$src)))]>;
1073 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1074 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1075 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1076 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1077 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1078 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1079 (memop addr:$src)))]>;
1081 // AVX 256-bit register conversion intrinsics
1082 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1083 // whenever possible to avoid declaring two versions of each one.
1084 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1085 (VCVTDQ2PSYrr VR256:$src)>;
1086 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1087 (VCVTDQ2PSYrm addr:$src)>;
1089 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1090 (VCVTPD2PSYrr VR256:$src)>;
1091 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1092 (VCVTPD2PSYrm addr:$src)>;
1094 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1095 (VCVTPS2DQYrr VR256:$src)>;
1096 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1097 (VCVTPS2DQYrm addr:$src)>;
1099 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1100 (VCVTPS2PDYrr VR128:$src)>;
1101 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1102 (VCVTPS2PDYrm addr:$src)>;
1104 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1105 (VCVTTPD2DQYrr VR256:$src)>;
1106 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1107 (VCVTTPD2DQYrm addr:$src)>;
1109 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1110 (VCVTTPS2DQYrr VR256:$src)>;
1111 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1112 (VCVTTPS2DQYrm addr:$src)>;
1114 //===----------------------------------------------------------------------===//
1115 // SSE 1 & 2 - Compare Instructions
1116 //===----------------------------------------------------------------------===//
1118 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1119 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1120 string asm, string asm_alt> {
1121 def rr : SIi8<0xC2, MRMSrcReg,
1122 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1125 def rm : SIi8<0xC2, MRMSrcMem,
1126 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1128 // Accept explicit immediate argument form instead of comparison code.
1129 let isAsmParserOnly = 1 in {
1130 def rr_alt : SIi8<0xC2, MRMSrcReg,
1131 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1134 def rm_alt : SIi8<0xC2, MRMSrcMem,
1135 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1140 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1141 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1142 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1143 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1145 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1146 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1147 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1151 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1152 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1153 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1154 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1155 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1156 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1157 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1160 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1161 Intrinsic Int, string asm> {
1162 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1163 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1164 [(set VR128:$dst, (Int VR128:$src1,
1165 VR128:$src, imm:$cc))]>;
1166 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1167 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1168 [(set VR128:$dst, (Int VR128:$src1,
1169 (load addr:$src), imm:$cc))]>;
1172 // Aliases to match intrinsics which expect XMM operand(s).
1173 let isAsmParserOnly = 1 in {
1174 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1175 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1177 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1178 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1181 let Constraints = "$src1 = $dst" in {
1182 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1183 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1184 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1185 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1189 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1190 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1191 ValueType vt, X86MemOperand x86memop,
1192 PatFrag ld_frag, string OpcodeStr, Domain d> {
1193 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1194 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1195 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1196 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1197 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1198 [(set EFLAGS, (OpNode (vt RC:$src1),
1199 (ld_frag addr:$src2)))], d>;
1202 let Defs = [EFLAGS] in {
1203 let isAsmParserOnly = 1 in {
1204 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1205 "ucomiss", SSEPackedSingle>, VEX;
1206 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1207 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1208 let Pattern = []<dag> in {
1209 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1210 "comiss", SSEPackedSingle>, VEX;
1211 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1212 "comisd", SSEPackedDouble>, OpSize, VEX;
1215 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1216 load, "ucomiss", SSEPackedSingle>, VEX;
1217 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1218 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1220 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1221 load, "comiss", SSEPackedSingle>, VEX;
1222 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1223 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1225 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1226 "ucomiss", SSEPackedSingle>, TB;
1227 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1228 "ucomisd", SSEPackedDouble>, TB, OpSize;
1230 let Pattern = []<dag> in {
1231 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1232 "comiss", SSEPackedSingle>, TB;
1233 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1234 "comisd", SSEPackedDouble>, TB, OpSize;
1237 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1238 load, "ucomiss", SSEPackedSingle>, TB;
1239 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1240 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1242 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1243 "comiss", SSEPackedSingle>, TB;
1244 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1245 "comisd", SSEPackedDouble>, TB, OpSize;
1246 } // Defs = [EFLAGS]
1248 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1249 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1250 Intrinsic Int, string asm, string asm_alt,
1252 def rri : PIi8<0xC2, MRMSrcReg,
1253 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1254 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1255 def rmi : PIi8<0xC2, MRMSrcMem,
1256 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1257 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1258 // Accept explicit immediate argument form instead of comparison code.
1259 let isAsmParserOnly = 1 in {
1260 def rri_alt : PIi8<0xC2, MRMSrcReg,
1261 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1263 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1264 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1269 let isAsmParserOnly = 1 in {
1270 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1271 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1272 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1273 SSEPackedSingle>, VEX_4V;
1274 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1275 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1276 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1277 SSEPackedDouble>, OpSize, VEX_4V;
1278 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1279 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1280 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1281 SSEPackedSingle>, VEX_4V;
1282 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1283 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1284 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1285 SSEPackedDouble>, OpSize, VEX_4V;
1287 let Constraints = "$src1 = $dst" in {
1288 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1289 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1290 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1291 SSEPackedSingle>, TB;
1292 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1293 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1294 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1295 SSEPackedDouble>, TB, OpSize;
1298 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1299 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1300 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1301 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1302 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1303 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1304 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1305 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1307 //===----------------------------------------------------------------------===//
1308 // SSE 1 & 2 - Shuffle Instructions
1309 //===----------------------------------------------------------------------===//
1311 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1312 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1313 ValueType vt, string asm, PatFrag mem_frag,
1314 Domain d, bit IsConvertibleToThreeAddress = 0> {
1315 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1316 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1317 [(set RC:$dst, (vt (shufp:$src3
1318 RC:$src1, (mem_frag addr:$src2))))], d>;
1319 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1320 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1321 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1323 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1326 let isAsmParserOnly = 1 in {
1327 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1328 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1329 memopv4f32, SSEPackedSingle>, VEX_4V;
1330 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1331 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1332 memopv8f32, SSEPackedSingle>, VEX_4V;
1333 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1334 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1335 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1336 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1337 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1338 memopv4f64, SSEPackedDouble>, OpSize, VEX_4V;
1341 let Constraints = "$src1 = $dst" in {
1342 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1343 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1344 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1346 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1347 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1348 memopv2f64, SSEPackedDouble>, TB, OpSize;
1351 //===----------------------------------------------------------------------===//
1352 // SSE 1 & 2 - Unpack Instructions
1353 //===----------------------------------------------------------------------===//
1355 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1356 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1357 PatFrag mem_frag, RegisterClass RC,
1358 X86MemOperand x86memop, string asm,
1360 def rr : PI<opc, MRMSrcReg,
1361 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1363 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1364 def rm : PI<opc, MRMSrcMem,
1365 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1367 (vt (OpNode RC:$src1,
1368 (mem_frag addr:$src2))))], d>;
1371 let AddedComplexity = 10 in {
1372 let isAsmParserOnly = 1 in {
1373 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1374 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1375 SSEPackedSingle>, VEX_4V;
1376 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1377 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1378 SSEPackedDouble>, OpSize, VEX_4V;
1379 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1380 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1381 SSEPackedSingle>, VEX_4V;
1382 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1383 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1384 SSEPackedDouble>, OpSize, VEX_4V;
1386 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1387 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1388 SSEPackedSingle>, VEX_4V;
1389 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1390 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1391 SSEPackedDouble>, OpSize, VEX_4V;
1392 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1393 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1394 SSEPackedSingle>, VEX_4V;
1395 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1396 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1397 SSEPackedDouble>, OpSize, VEX_4V;
1400 let Constraints = "$src1 = $dst" in {
1401 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1402 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1403 SSEPackedSingle>, TB;
1404 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1405 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1406 SSEPackedDouble>, TB, OpSize;
1407 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1408 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1409 SSEPackedSingle>, TB;
1410 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1411 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1412 SSEPackedDouble>, TB, OpSize;
1413 } // Constraints = "$src1 = $dst"
1414 } // AddedComplexity
1416 //===----------------------------------------------------------------------===//
1417 // SSE 1 & 2 - Extract Floating-Point Sign mask
1418 //===----------------------------------------------------------------------===//
1420 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1421 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1423 def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1424 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1425 [(set GR32:$dst, (Int RC:$src))], d>;
1429 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1430 SSEPackedSingle>, TB;
1431 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1432 SSEPackedDouble>, TB, OpSize;
1434 let isAsmParserOnly = 1 in {
1435 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1436 "movmskps", SSEPackedSingle>, VEX;
1437 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1438 "movmskpd", SSEPackedDouble>, OpSize,
1440 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1441 "movmskps", SSEPackedSingle>, VEX;
1442 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1443 "movmskpd", SSEPackedDouble>, OpSize,
1447 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1448 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1449 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1450 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1452 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1453 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1454 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1455 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1459 //===----------------------------------------------------------------------===//
1460 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1461 //===----------------------------------------------------------------------===//
1463 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1464 // names that start with 'Fs'.
1466 // Alias instructions that map fld0 to pxor for sse.
1467 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1468 canFoldAsLoad = 1 in {
1469 // FIXME: Set encoding to pseudo!
1470 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1471 [(set FR32:$dst, fp32imm0)]>,
1472 Requires<[HasSSE1]>, TB, OpSize;
1473 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1474 [(set FR64:$dst, fpimm0)]>,
1475 Requires<[HasSSE2]>, TB, OpSize;
1478 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1479 // bits are disregarded.
1480 let neverHasSideEffects = 1 in {
1481 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1482 "movaps\t{$src, $dst|$dst, $src}", []>;
1483 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1484 "movapd\t{$src, $dst|$dst, $src}", []>;
1487 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1488 // bits are disregarded.
1489 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1490 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1491 "movaps\t{$src, $dst|$dst, $src}",
1492 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1493 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1494 "movapd\t{$src, $dst|$dst, $src}",
1495 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1498 //===----------------------------------------------------------------------===//
1499 // SSE 1 & 2 - Logical Instructions
1500 //===----------------------------------------------------------------------===//
1502 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1504 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1506 let isAsmParserOnly = 1 in {
1507 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1508 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1510 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1511 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1514 let Constraints = "$src1 = $dst" in {
1515 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1516 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1518 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1519 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1523 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1524 let mayLoad = 0 in {
1525 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1526 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1527 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1530 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1531 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1533 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1535 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1536 SDNode OpNode, int HasPat = 0,
1537 list<list<dag>> Pattern = []> {
1538 let isAsmParserOnly = 1, Pattern = []<dag> in {
1539 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1540 !strconcat(OpcodeStr, "ps"), f128mem,
1541 !if(HasPat, Pattern[0], // rr
1542 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1544 !if(HasPat, Pattern[2], // rm
1545 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1546 (memopv2i64 addr:$src2)))]), 0>,
1549 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1550 !strconcat(OpcodeStr, "pd"), f128mem,
1551 !if(HasPat, Pattern[1], // rr
1552 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1555 !if(HasPat, Pattern[3], // rm
1556 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1557 (memopv2i64 addr:$src2)))]), 0>,
1560 let Constraints = "$src1 = $dst" in {
1561 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1562 !strconcat(OpcodeStr, "ps"), f128mem,
1563 !if(HasPat, Pattern[0], // rr
1564 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1566 !if(HasPat, Pattern[2], // rm
1567 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1568 (memopv2i64 addr:$src2)))])>, TB;
1570 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1571 !strconcat(OpcodeStr, "pd"), f128mem,
1572 !if(HasPat, Pattern[1], // rr
1573 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1576 !if(HasPat, Pattern[3], // rm
1577 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1578 (memopv2i64 addr:$src2)))])>,
1583 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1585 let isAsmParserOnly = 1 in {
1586 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1587 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1588 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1590 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1591 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1595 // AVX 256-bit packed logical ops forms
1596 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1597 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1598 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1599 let isCommutable = 0 in
1600 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1602 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1603 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1604 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1605 let isCommutable = 0 in
1606 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1608 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1609 (bc_v2i64 (v4i32 immAllOnesV))),
1612 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1613 (bc_v2i64 (v2f64 VR128:$src2))))],
1615 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1616 (bc_v2i64 (v4i32 immAllOnesV))),
1617 (memopv2i64 addr:$src2))))],
1619 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1620 (memopv2i64 addr:$src2)))]]>;
1622 //===----------------------------------------------------------------------===//
1623 // SSE 1 & 2 - Arithmetic Instructions
1624 //===----------------------------------------------------------------------===//
1626 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1629 /// In addition, we also have a special variant of the scalar form here to
1630 /// represent the associated intrinsic operation. This form is unlike the
1631 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1632 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1634 /// These three forms can each be reg+reg or reg+mem.
1637 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1639 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1641 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1642 OpNode, FR32, f32mem, Is2Addr>, XS;
1643 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1644 OpNode, FR64, f64mem, Is2Addr>, XD;
1647 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1649 let mayLoad = 0 in {
1650 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1651 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1652 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1653 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1657 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1659 let mayLoad = 0 in {
1660 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1661 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1662 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1663 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1667 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1669 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1670 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1671 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1672 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1675 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1677 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1678 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1679 SSEPackedSingle, Is2Addr>, TB;
1681 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1682 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1683 SSEPackedDouble, Is2Addr>, TB, OpSize;
1686 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1687 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1688 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1689 SSEPackedSingle, 0>, TB;
1691 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1692 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1693 SSEPackedDouble, 0>, TB, OpSize;
1696 // Binary Arithmetic instructions
1697 let isAsmParserOnly = 1 in {
1698 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1699 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1700 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1701 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1702 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1703 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1704 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1705 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1707 let isCommutable = 0 in {
1708 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1709 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1710 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1711 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1712 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1713 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1714 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1715 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1716 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1717 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1718 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1719 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1720 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1721 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1722 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1723 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1724 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1725 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1726 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1727 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1731 let Constraints = "$src1 = $dst" in {
1732 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1733 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1734 basic_sse12_fp_binop_s_int<0x58, "add">;
1735 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1736 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1737 basic_sse12_fp_binop_s_int<0x59, "mul">;
1739 let isCommutable = 0 in {
1740 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1741 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1742 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1743 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1744 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1745 basic_sse12_fp_binop_s_int<0x5E, "div">;
1746 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1747 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1748 basic_sse12_fp_binop_s_int<0x5F, "max">,
1749 basic_sse12_fp_binop_p_int<0x5F, "max">;
1750 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1751 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1752 basic_sse12_fp_binop_s_int<0x5D, "min">,
1753 basic_sse12_fp_binop_p_int<0x5D, "min">;
1758 /// In addition, we also have a special variant of the scalar form here to
1759 /// represent the associated intrinsic operation. This form is unlike the
1760 /// plain scalar form, in that it takes an entire vector (instead of a
1761 /// scalar) and leaves the top elements undefined.
1763 /// And, we have a special variant form for a full-vector intrinsic form.
1765 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1766 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1767 SDNode OpNode, Intrinsic F32Int> {
1768 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1769 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1770 [(set FR32:$dst, (OpNode FR32:$src))]>;
1771 // For scalar unary operations, fold a load into the operation
1772 // only in OptForSize mode. It eliminates an instruction, but it also
1773 // eliminates a whole-register clobber (the load), so it introduces a
1774 // partial register update condition.
1775 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1776 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1777 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1778 Requires<[HasSSE1, OptForSize]>;
1779 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1780 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1781 [(set VR128:$dst, (F32Int VR128:$src))]>;
1782 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1783 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1784 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1787 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1788 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1789 SDNode OpNode, Intrinsic F32Int> {
1790 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1791 !strconcat(OpcodeStr,
1792 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1793 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1794 !strconcat(OpcodeStr,
1795 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1796 []>, XS, Requires<[HasAVX, OptForSize]>;
1797 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1798 !strconcat(OpcodeStr,
1799 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1800 [(set VR128:$dst, (F32Int VR128:$src))]>;
1801 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1802 !strconcat(OpcodeStr,
1803 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1804 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1807 /// sse1_fp_unop_p - SSE1 unops in packed form.
1808 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1809 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1810 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1811 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1812 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1813 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1814 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1817 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1818 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1819 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1820 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1821 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1822 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1823 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1824 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1827 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1828 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1829 Intrinsic V4F32Int> {
1830 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1831 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1832 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1833 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1834 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1835 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1838 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1839 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1840 Intrinsic V4F32Int> {
1841 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1842 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1843 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1844 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1845 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1846 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1849 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1850 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1851 SDNode OpNode, Intrinsic F64Int> {
1852 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1853 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1854 [(set FR64:$dst, (OpNode FR64:$src))]>;
1855 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1856 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1857 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1858 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1859 Requires<[HasSSE2, OptForSize]>;
1860 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1861 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1862 [(set VR128:$dst, (F64Int VR128:$src))]>;
1863 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1864 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1865 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1868 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1869 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1870 SDNode OpNode, Intrinsic F64Int> {
1871 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1872 !strconcat(OpcodeStr,
1873 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1874 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1875 (ins FR64:$src1, f64mem:$src2),
1876 !strconcat(OpcodeStr,
1877 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1878 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1879 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1880 [(set VR128:$dst, (F64Int VR128:$src))]>;
1881 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1882 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1883 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1886 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1887 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1889 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1890 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1891 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1892 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1893 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1894 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1897 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1898 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1899 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1900 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1901 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1902 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1903 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1904 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1907 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1908 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1909 Intrinsic V2F64Int> {
1910 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1911 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1912 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1913 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1914 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1915 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1918 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1919 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1920 Intrinsic V2F64Int> {
1921 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1922 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1923 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1924 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1925 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1926 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1929 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1931 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1932 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1935 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1936 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1937 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1938 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1939 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1940 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1941 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1942 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1945 // Reciprocal approximations. Note that these typically require refinement
1946 // in order to obtain suitable precision.
1947 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1948 int_x86_sse_rsqrt_ss>, VEX_4V;
1949 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1950 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1951 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1952 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1954 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1956 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1957 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1958 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1959 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1963 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1964 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1965 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1966 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1967 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1968 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1970 // Reciprocal approximations. Note that these typically require refinement
1971 // in order to obtain suitable precision.
1972 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1973 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1974 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1975 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1976 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1977 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1979 // There is no f64 version of the reciprocal approximation instructions.
1981 //===----------------------------------------------------------------------===//
1982 // SSE 1 & 2 - Non-temporal stores
1983 //===----------------------------------------------------------------------===//
1985 let isAsmParserOnly = 1 in {
1986 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
1987 (ins i128mem:$dst, VR128:$src),
1988 "movntps\t{$src, $dst|$dst, $src}",
1989 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
1990 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
1991 (ins i128mem:$dst, VR128:$src),
1992 "movntpd\t{$src, $dst|$dst, $src}",
1993 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
1995 let ExeDomain = SSEPackedInt in
1996 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
1997 (ins f128mem:$dst, VR128:$src),
1998 "movntdq\t{$src, $dst|$dst, $src}",
1999 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
2001 let AddedComplexity = 400 in { // Prefer non-temporal versions
2002 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2003 (ins f128mem:$dst, VR128:$src),
2004 "movntps\t{$src, $dst|$dst, $src}",
2005 [(alignednontemporalstore (v4f32 VR128:$src),
2007 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2008 (ins f128mem:$dst, VR128:$src),
2009 "movntpd\t{$src, $dst|$dst, $src}",
2010 [(alignednontemporalstore (v2f64 VR128:$src),
2012 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2013 (ins f128mem:$dst, VR128:$src),
2014 "movntdq\t{$src, $dst|$dst, $src}",
2015 [(alignednontemporalstore (v2f64 VR128:$src),
2017 let ExeDomain = SSEPackedInt in
2018 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2019 (ins f128mem:$dst, VR128:$src),
2020 "movntdq\t{$src, $dst|$dst, $src}",
2021 [(alignednontemporalstore (v4f32 VR128:$src),
2024 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2025 (ins f256mem:$dst, VR256:$src),
2026 "movntps\t{$src, $dst|$dst, $src}",
2027 [(alignednontemporalstore (v8f32 VR256:$src),
2029 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2030 (ins f256mem:$dst, VR256:$src),
2031 "movntpd\t{$src, $dst|$dst, $src}",
2032 [(alignednontemporalstore (v4f64 VR256:$src),
2034 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2035 (ins f256mem:$dst, VR256:$src),
2036 "movntdq\t{$src, $dst|$dst, $src}",
2037 [(alignednontemporalstore (v4f64 VR256:$src),
2039 let ExeDomain = SSEPackedInt in
2040 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2041 (ins f256mem:$dst, VR256:$src),
2042 "movntdq\t{$src, $dst|$dst, $src}",
2043 [(alignednontemporalstore (v8f32 VR256:$src),
2048 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2049 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2050 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2051 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2052 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2053 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2055 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2056 "movntps\t{$src, $dst|$dst, $src}",
2057 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2058 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2059 "movntpd\t{$src, $dst|$dst, $src}",
2060 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2062 let ExeDomain = SSEPackedInt in
2063 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2064 "movntdq\t{$src, $dst|$dst, $src}",
2065 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2067 let AddedComplexity = 400 in { // Prefer non-temporal versions
2068 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2069 "movntps\t{$src, $dst|$dst, $src}",
2070 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2071 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2072 "movntpd\t{$src, $dst|$dst, $src}",
2073 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2075 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2076 "movntdq\t{$src, $dst|$dst, $src}",
2077 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2079 let ExeDomain = SSEPackedInt in
2080 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2081 "movntdq\t{$src, $dst|$dst, $src}",
2082 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2084 // There is no AVX form for instructions below this point
2085 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2086 "movnti\t{$src, $dst|$dst, $src}",
2087 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2088 TB, Requires<[HasSSE2]>;
2090 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2091 "movnti\t{$src, $dst|$dst, $src}",
2092 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2093 TB, Requires<[HasSSE2]>;
2096 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2097 "movnti\t{$src, $dst|$dst, $src}",
2098 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2099 TB, Requires<[HasSSE2]>;
2101 //===----------------------------------------------------------------------===//
2102 // SSE 1 & 2 - Misc Instructions (No AVX form)
2103 //===----------------------------------------------------------------------===//
2105 // Prefetch intrinsic.
2106 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2107 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2108 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2109 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2110 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2111 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2112 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2113 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2115 // Load, store, and memory fence
2116 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2117 TB, Requires<[HasSSE1]>;
2118 def : Pat<(X86SFence), (SFENCE)>;
2120 // Alias instructions that map zero vector to pxor / xorp* for sse.
2121 // We set canFoldAsLoad because this can be converted to a constant-pool
2122 // load of an all-zeros value if folding it would be beneficial.
2123 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2124 // JIT implementatioan, it does not expand the instructions below like
2125 // X86MCInstLower does.
2126 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2127 isCodeGenOnly = 1 in {
2128 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2129 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2130 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2131 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2132 let ExeDomain = SSEPackedInt in
2133 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2134 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2137 // The same as done above but for AVX. The 128-bit versions are the
2138 // same, but re-encoded. The 256-bit does not support PI version.
2139 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2140 // JIT implementatioan, it does not expand the instructions below like
2141 // X86MCInstLower does.
2142 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2143 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2144 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2145 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2146 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2147 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2148 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2149 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2150 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2151 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2152 let ExeDomain = SSEPackedInt in
2153 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2154 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2157 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2158 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2159 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2161 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2162 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2164 //===----------------------------------------------------------------------===//
2165 // SSE 1 & 2 - Load/Store XCSR register
2166 //===----------------------------------------------------------------------===//
2168 let isAsmParserOnly = 1 in {
2169 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2170 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2171 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2172 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2175 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2176 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2177 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2178 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2180 //===---------------------------------------------------------------------===//
2181 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2182 //===---------------------------------------------------------------------===//
2184 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2186 let isAsmParserOnly = 1 in {
2187 let neverHasSideEffects = 1 in {
2188 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2189 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2190 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2191 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2193 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2194 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2195 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2196 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2198 let canFoldAsLoad = 1, mayLoad = 1 in {
2199 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2200 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2201 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2202 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2203 let Predicates = [HasAVX] in {
2204 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2205 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2206 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2207 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2211 let mayStore = 1 in {
2212 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2213 (ins i128mem:$dst, VR128:$src),
2214 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2215 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2216 (ins i256mem:$dst, VR256:$src),
2217 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2218 let Predicates = [HasAVX] in {
2219 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2220 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2221 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2222 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2227 let neverHasSideEffects = 1 in
2228 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2229 "movdqa\t{$src, $dst|$dst, $src}", []>;
2231 let canFoldAsLoad = 1, mayLoad = 1 in {
2232 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2233 "movdqa\t{$src, $dst|$dst, $src}",
2234 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2235 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2236 "movdqu\t{$src, $dst|$dst, $src}",
2237 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2238 XS, Requires<[HasSSE2]>;
2241 let mayStore = 1 in {
2242 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2243 "movdqa\t{$src, $dst|$dst, $src}",
2244 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2245 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2246 "movdqu\t{$src, $dst|$dst, $src}",
2247 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2248 XS, Requires<[HasSSE2]>;
2251 // Intrinsic forms of MOVDQU load and store
2252 let isAsmParserOnly = 1 in {
2253 let canFoldAsLoad = 1 in
2254 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2255 "vmovdqu\t{$src, $dst|$dst, $src}",
2256 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2257 XS, VEX, Requires<[HasAVX]>;
2258 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2259 "vmovdqu\t{$src, $dst|$dst, $src}",
2260 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2261 XS, VEX, Requires<[HasAVX]>;
2264 let canFoldAsLoad = 1 in
2265 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2266 "movdqu\t{$src, $dst|$dst, $src}",
2267 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2268 XS, Requires<[HasSSE2]>;
2269 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2270 "movdqu\t{$src, $dst|$dst, $src}",
2271 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2272 XS, Requires<[HasSSE2]>;
2274 } // ExeDomain = SSEPackedInt
2276 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2277 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2278 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2280 //===---------------------------------------------------------------------===//
2281 // SSE2 - Packed Integer Arithmetic Instructions
2282 //===---------------------------------------------------------------------===//
2284 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2286 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2287 bit IsCommutable = 0, bit Is2Addr = 1> {
2288 let isCommutable = IsCommutable in
2289 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2290 (ins VR128:$src1, VR128:$src2),
2292 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2293 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2294 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2295 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2296 (ins VR128:$src1, i128mem:$src2),
2298 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2299 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2300 [(set VR128:$dst, (IntId VR128:$src1,
2301 (bitconvert (memopv2i64 addr:$src2))))]>;
2304 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2305 string OpcodeStr, Intrinsic IntId,
2306 Intrinsic IntId2, bit Is2Addr = 1> {
2307 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2308 (ins VR128:$src1, VR128:$src2),
2310 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2311 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2312 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2313 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2314 (ins VR128:$src1, i128mem:$src2),
2316 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2317 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2318 [(set VR128:$dst, (IntId VR128:$src1,
2319 (bitconvert (memopv2i64 addr:$src2))))]>;
2320 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2321 (ins VR128:$src1, i32i8imm:$src2),
2323 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2324 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2325 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2328 /// PDI_binop_rm - Simple SSE2 binary operator.
2329 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2330 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2331 let isCommutable = IsCommutable in
2332 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2333 (ins VR128:$src1, VR128:$src2),
2335 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2336 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2337 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2338 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2339 (ins VR128:$src1, i128mem:$src2),
2341 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2342 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2343 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2344 (bitconvert (memopv2i64 addr:$src2)))))]>;
2347 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2349 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2350 /// to collapse (bitconvert VT to VT) into its operand.
2352 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2353 bit IsCommutable = 0, bit Is2Addr = 1> {
2354 let isCommutable = IsCommutable in
2355 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2356 (ins VR128:$src1, VR128:$src2),
2358 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2359 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2360 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2361 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2362 (ins VR128:$src1, i128mem:$src2),
2364 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2365 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2366 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2369 } // ExeDomain = SSEPackedInt
2371 // 128-bit Integer Arithmetic
2373 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2374 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2375 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2376 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2377 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2378 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2379 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2380 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2381 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2382 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2385 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2387 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2389 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2391 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2393 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2395 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2397 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2399 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2401 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2403 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2405 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2407 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2409 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2411 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2413 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2415 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2417 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2419 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2421 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2425 let Constraints = "$src1 = $dst" in {
2426 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2427 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2428 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2429 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2430 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2431 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2432 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2433 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2434 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2437 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2438 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2439 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2440 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2441 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2442 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2443 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2444 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2445 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2446 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2447 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2448 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2449 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2450 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2451 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2452 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2453 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2454 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2455 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2457 } // Constraints = "$src1 = $dst"
2459 //===---------------------------------------------------------------------===//
2460 // SSE2 - Packed Integer Logical Instructions
2461 //===---------------------------------------------------------------------===//
2463 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2464 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2465 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2467 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2468 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2470 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2471 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2474 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2475 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2477 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2478 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2480 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2481 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2484 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2485 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2487 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2488 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2491 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2492 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2493 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2495 let ExeDomain = SSEPackedInt in {
2496 let neverHasSideEffects = 1 in {
2497 // 128-bit logical shifts.
2498 def VPSLLDQri : PDIi8<0x73, MRM7r,
2499 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2500 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2502 def VPSRLDQri : PDIi8<0x73, MRM3r,
2503 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2504 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2506 // PSRADQri doesn't exist in SSE[1-3].
2508 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2509 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2510 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2511 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2512 VR128:$src2)))]>, VEX_4V;
2514 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2515 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2516 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2517 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2518 (memopv2i64 addr:$src2))))]>,
2523 let Constraints = "$src1 = $dst" in {
2524 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2525 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2526 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2527 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2528 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2529 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2531 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2532 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2533 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2534 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2535 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2536 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2538 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2539 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2540 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2541 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2543 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2544 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2545 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2547 let ExeDomain = SSEPackedInt in {
2548 let neverHasSideEffects = 1 in {
2549 // 128-bit logical shifts.
2550 def PSLLDQri : PDIi8<0x73, MRM7r,
2551 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2552 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2553 def PSRLDQri : PDIi8<0x73, MRM3r,
2554 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2555 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2556 // PSRADQri doesn't exist in SSE[1-3].
2558 def PANDNrr : PDI<0xDF, MRMSrcReg,
2559 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2560 "pandn\t{$src2, $dst|$dst, $src2}",
2561 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2564 def PANDNrm : PDI<0xDF, MRMSrcMem,
2565 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2566 "pandn\t{$src2, $dst|$dst, $src2}",
2567 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2568 (memopv2i64 addr:$src2))))]>;
2570 } // Constraints = "$src1 = $dst"
2572 let Predicates = [HasAVX] in {
2573 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2574 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2575 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2576 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2577 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2578 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2579 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2580 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2581 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2582 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2584 // Shift up / down and insert zero's.
2585 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2586 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2587 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2588 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2591 let Predicates = [HasSSE2] in {
2592 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2593 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2594 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2595 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2596 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2597 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2598 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2599 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2600 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2601 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2603 // Shift up / down and insert zero's.
2604 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2605 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2606 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2607 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2610 //===---------------------------------------------------------------------===//
2611 // SSE2 - Packed Integer Comparison Instructions
2612 //===---------------------------------------------------------------------===//
2614 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2615 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2617 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2619 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2621 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2623 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2625 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2629 let Constraints = "$src1 = $dst" in {
2630 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2631 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2632 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2633 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2634 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2635 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2636 } // Constraints = "$src1 = $dst"
2638 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2639 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2640 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2641 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2642 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2643 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2644 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2645 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2646 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2647 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2648 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2649 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2651 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2652 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2653 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2654 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2655 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2656 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2657 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2658 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2659 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2660 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2661 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2662 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2664 //===---------------------------------------------------------------------===//
2665 // SSE2 - Packed Integer Pack Instructions
2666 //===---------------------------------------------------------------------===//
2668 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2669 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2671 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2673 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2677 let Constraints = "$src1 = $dst" in {
2678 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2679 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2680 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2681 } // Constraints = "$src1 = $dst"
2683 //===---------------------------------------------------------------------===//
2684 // SSE2 - Packed Integer Shuffle Instructions
2685 //===---------------------------------------------------------------------===//
2687 let ExeDomain = SSEPackedInt in {
2688 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2690 def ri : Ii8<0x70, MRMSrcReg,
2691 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2692 !strconcat(OpcodeStr,
2693 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2694 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2696 def mi : Ii8<0x70, MRMSrcMem,
2697 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2698 !strconcat(OpcodeStr,
2699 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2700 [(set VR128:$dst, (vt (pshuf_frag:$src2
2701 (bc_frag (memopv2i64 addr:$src1)),
2704 } // ExeDomain = SSEPackedInt
2706 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2707 let AddedComplexity = 5 in
2708 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2711 // SSE2 with ImmT == Imm8 and XS prefix.
2712 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2715 // SSE2 with ImmT == Imm8 and XD prefix.
2716 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2720 let Predicates = [HasSSE2] in {
2721 let AddedComplexity = 5 in
2722 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2724 // SSE2 with ImmT == Imm8 and XS prefix.
2725 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2727 // SSE2 with ImmT == Imm8 and XD prefix.
2728 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2731 //===---------------------------------------------------------------------===//
2732 // SSE2 - Packed Integer Unpack Instructions
2733 //===---------------------------------------------------------------------===//
2735 let ExeDomain = SSEPackedInt in {
2736 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2737 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2738 def rr : PDI<opc, MRMSrcReg,
2739 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2741 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2742 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2743 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2744 def rm : PDI<opc, MRMSrcMem,
2745 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2747 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2748 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2749 [(set VR128:$dst, (unp_frag VR128:$src1,
2750 (bc_frag (memopv2i64
2754 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2755 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2757 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2759 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2762 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2763 /// knew to collapse (bitconvert VT to VT) into its operand.
2764 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2765 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2766 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2768 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2769 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2770 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2771 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2773 (v2i64 (unpckl VR128:$src1,
2774 (memopv2i64 addr:$src2))))]>, VEX_4V;
2776 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2778 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2780 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2783 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2784 /// knew to collapse (bitconvert VT to VT) into its operand.
2785 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2786 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2787 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2789 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2790 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2791 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2792 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2794 (v2i64 (unpckh VR128:$src1,
2795 (memopv2i64 addr:$src2))))]>, VEX_4V;
2798 let Constraints = "$src1 = $dst" in {
2799 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2800 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2801 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2803 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2804 /// knew to collapse (bitconvert VT to VT) into its operand.
2805 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2806 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2807 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2809 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2810 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2811 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2812 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2814 (v2i64 (unpckl VR128:$src1,
2815 (memopv2i64 addr:$src2))))]>;
2817 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2818 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2819 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2821 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2822 /// knew to collapse (bitconvert VT to VT) into its operand.
2823 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2824 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2825 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2827 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2828 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2829 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2830 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2832 (v2i64 (unpckh VR128:$src1,
2833 (memopv2i64 addr:$src2))))]>;
2836 } // ExeDomain = SSEPackedInt
2838 //===---------------------------------------------------------------------===//
2839 // SSE2 - Packed Integer Extract and Insert
2840 //===---------------------------------------------------------------------===//
2842 let ExeDomain = SSEPackedInt in {
2843 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2844 def rri : Ii8<0xC4, MRMSrcReg,
2845 (outs VR128:$dst), (ins VR128:$src1,
2846 GR32:$src2, i32i8imm:$src3),
2848 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2849 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2851 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2852 def rmi : Ii8<0xC4, MRMSrcMem,
2853 (outs VR128:$dst), (ins VR128:$src1,
2854 i16mem:$src2, i32i8imm:$src3),
2856 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2857 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2859 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2864 let isAsmParserOnly = 1, Predicates = [HasAVX] in
2865 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2866 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2867 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2868 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2869 imm:$src2))]>, OpSize, VEX;
2870 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2871 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2872 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2873 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2877 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2878 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2879 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2880 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2881 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2882 []>, OpSize, VEX_4V;
2885 let Constraints = "$src1 = $dst" in
2886 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2888 } // ExeDomain = SSEPackedInt
2890 //===---------------------------------------------------------------------===//
2891 // SSE2 - Packed Mask Creation
2892 //===---------------------------------------------------------------------===//
2894 let ExeDomain = SSEPackedInt in {
2896 let isAsmParserOnly = 1 in {
2897 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2898 "pmovmskb\t{$src, $dst|$dst, $src}",
2899 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2900 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2901 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2903 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2904 "pmovmskb\t{$src, $dst|$dst, $src}",
2905 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2907 } // ExeDomain = SSEPackedInt
2909 //===---------------------------------------------------------------------===//
2910 // SSE2 - Conditional Store
2911 //===---------------------------------------------------------------------===//
2913 let ExeDomain = SSEPackedInt in {
2915 let isAsmParserOnly = 1 in {
2917 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2918 (ins VR128:$src, VR128:$mask),
2919 "maskmovdqu\t{$mask, $src|$src, $mask}",
2920 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2922 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2923 (ins VR128:$src, VR128:$mask),
2924 "maskmovdqu\t{$mask, $src|$src, $mask}",
2925 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2929 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2930 "maskmovdqu\t{$mask, $src|$src, $mask}",
2931 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2933 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2934 "maskmovdqu\t{$mask, $src|$src, $mask}",
2935 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2937 } // ExeDomain = SSEPackedInt
2939 //===---------------------------------------------------------------------===//
2940 // SSE2 - Move Doubleword
2941 //===---------------------------------------------------------------------===//
2943 // Move Int Doubleword to Packed Double Int
2944 let isAsmParserOnly = 1 in {
2945 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2946 "movd\t{$src, $dst|$dst, $src}",
2948 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2949 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2950 "movd\t{$src, $dst|$dst, $src}",
2952 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2955 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2956 "movd\t{$src, $dst|$dst, $src}",
2958 (v4i32 (scalar_to_vector GR32:$src)))]>;
2959 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2960 "movd\t{$src, $dst|$dst, $src}",
2962 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2965 // Move Int Doubleword to Single Scalar
2966 let isAsmParserOnly = 1 in {
2967 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2968 "movd\t{$src, $dst|$dst, $src}",
2969 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2971 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2972 "movd\t{$src, $dst|$dst, $src}",
2973 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2976 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2977 "movd\t{$src, $dst|$dst, $src}",
2978 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2980 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2981 "movd\t{$src, $dst|$dst, $src}",
2982 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2984 // Move Packed Doubleword Int to Packed Double Int
2985 let isAsmParserOnly = 1 in {
2986 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2987 "movd\t{$src, $dst|$dst, $src}",
2988 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2990 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2991 (ins i32mem:$dst, VR128:$src),
2992 "movd\t{$src, $dst|$dst, $src}",
2993 [(store (i32 (vector_extract (v4i32 VR128:$src),
2994 (iPTR 0))), addr:$dst)]>, VEX;
2996 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2997 "movd\t{$src, $dst|$dst, $src}",
2998 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3000 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3001 "movd\t{$src, $dst|$dst, $src}",
3002 [(store (i32 (vector_extract (v4i32 VR128:$src),
3003 (iPTR 0))), addr:$dst)]>;
3005 // Move Scalar Single to Double Int
3006 let isAsmParserOnly = 1 in {
3007 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3008 "movd\t{$src, $dst|$dst, $src}",
3009 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3010 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3011 "movd\t{$src, $dst|$dst, $src}",
3012 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3014 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3015 "movd\t{$src, $dst|$dst, $src}",
3016 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3017 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3018 "movd\t{$src, $dst|$dst, $src}",
3019 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3021 // movd / movq to XMM register zero-extends
3022 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3023 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3024 "movd\t{$src, $dst|$dst, $src}",
3025 [(set VR128:$dst, (v4i32 (X86vzmovl
3026 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3028 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3029 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3030 [(set VR128:$dst, (v2i64 (X86vzmovl
3031 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3034 let AddedComplexity = 15 in {
3035 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3036 "movd\t{$src, $dst|$dst, $src}",
3037 [(set VR128:$dst, (v4i32 (X86vzmovl
3038 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3039 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3040 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3041 [(set VR128:$dst, (v2i64 (X86vzmovl
3042 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3045 let AddedComplexity = 20 in {
3046 let isAsmParserOnly = 1 in
3047 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3048 "movd\t{$src, $dst|$dst, $src}",
3050 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3051 (loadi32 addr:$src))))))]>,
3053 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3054 "movd\t{$src, $dst|$dst, $src}",
3056 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3057 (loadi32 addr:$src))))))]>;
3059 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3060 (MOVZDI2PDIrm addr:$src)>;
3061 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3062 (MOVZDI2PDIrm addr:$src)>;
3063 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3064 (MOVZDI2PDIrm addr:$src)>;
3067 //===---------------------------------------------------------------------===//
3068 // SSE2 - Move Quadword
3069 //===---------------------------------------------------------------------===//
3071 // Move Quadword Int to Packed Quadword Int
3072 let isAsmParserOnly = 1 in
3073 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3074 "vmovq\t{$src, $dst|$dst, $src}",
3076 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3077 VEX, Requires<[HasAVX]>;
3078 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3079 "movq\t{$src, $dst|$dst, $src}",
3081 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3082 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3084 // Move Packed Quadword Int to Quadword Int
3085 let isAsmParserOnly = 1 in
3086 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3087 "movq\t{$src, $dst|$dst, $src}",
3088 [(store (i64 (vector_extract (v2i64 VR128:$src),
3089 (iPTR 0))), addr:$dst)]>, VEX;
3090 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3091 "movq\t{$src, $dst|$dst, $src}",
3092 [(store (i64 (vector_extract (v2i64 VR128:$src),
3093 (iPTR 0))), addr:$dst)]>;
3095 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3096 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3098 // Store / copy lower 64-bits of a XMM register.
3099 let isAsmParserOnly = 1 in
3100 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3101 "movq\t{$src, $dst|$dst, $src}",
3102 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3103 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3104 "movq\t{$src, $dst|$dst, $src}",
3105 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3107 let AddedComplexity = 20, isAsmParserOnly = 1 in
3108 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3109 "vmovq\t{$src, $dst|$dst, $src}",
3111 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3112 (loadi64 addr:$src))))))]>,
3113 XS, VEX, Requires<[HasAVX]>;
3115 let AddedComplexity = 20 in {
3116 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3117 "movq\t{$src, $dst|$dst, $src}",
3119 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3120 (loadi64 addr:$src))))))]>,
3121 XS, Requires<[HasSSE2]>;
3123 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3124 (MOVZQI2PQIrm addr:$src)>;
3125 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3126 (MOVZQI2PQIrm addr:$src)>;
3127 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3130 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3131 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3132 let isAsmParserOnly = 1, AddedComplexity = 15 in
3133 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3134 "vmovq\t{$src, $dst|$dst, $src}",
3135 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3136 XS, VEX, Requires<[HasAVX]>;
3137 let AddedComplexity = 15 in
3138 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3139 "movq\t{$src, $dst|$dst, $src}",
3140 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3141 XS, Requires<[HasSSE2]>;
3143 let AddedComplexity = 20, isAsmParserOnly = 1 in
3144 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3145 "vmovq\t{$src, $dst|$dst, $src}",
3146 [(set VR128:$dst, (v2i64 (X86vzmovl
3147 (loadv2i64 addr:$src))))]>,
3148 XS, VEX, Requires<[HasAVX]>;
3149 let AddedComplexity = 20 in {
3150 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3151 "movq\t{$src, $dst|$dst, $src}",
3152 [(set VR128:$dst, (v2i64 (X86vzmovl
3153 (loadv2i64 addr:$src))))]>,
3154 XS, Requires<[HasSSE2]>;
3156 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3157 (MOVZPQILo2PQIrm addr:$src)>;
3160 // Instructions to match in the assembler
3161 let isAsmParserOnly = 1 in {
3162 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3163 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3164 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3165 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3166 // Recognize "movd" with GR64 destination, but encode as a "movq"
3167 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3168 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3171 // Instructions for the disassembler
3172 // xr = XMM register
3175 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3176 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3177 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3178 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3179 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3181 //===---------------------------------------------------------------------===//
3182 // SSE2 - Misc Instructions
3183 //===---------------------------------------------------------------------===//
3186 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3187 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3188 TB, Requires<[HasSSE2]>;
3190 // Load, store, and memory fence
3191 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3192 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3193 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3194 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3195 def : Pat<(X86LFence), (LFENCE)>;
3196 def : Pat<(X86MFence), (MFENCE)>;
3199 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3200 // was introduced with SSE2, it's backward compatible.
3201 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3203 // Alias instructions that map zero vector to pxor / xorp* for sse.
3204 // We set canFoldAsLoad because this can be converted to a constant-pool
3205 // load of an all-ones value if folding it would be beneficial.
3206 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3207 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3208 // FIXME: Change encoding to pseudo.
3209 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3210 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3212 //===---------------------------------------------------------------------===//
3213 // SSE3 - Conversion Instructions
3214 //===---------------------------------------------------------------------===//
3216 // Convert Packed Double FP to Packed DW Integers
3217 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3218 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3219 // register, but the same isn't true when using memory operands instead.
3220 // Provide other assembly rr and rm forms to address this explicitly.
3221 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3222 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3223 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3224 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3227 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3228 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3229 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3230 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3233 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3234 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3235 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3236 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3239 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3240 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3241 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3242 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3244 // Convert Packed DW Integers to Packed Double FP
3245 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3246 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3247 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3248 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3249 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3250 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3251 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3252 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3253 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3256 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3257 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3258 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3259 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3261 // AVX 256-bit register conversion intrinsics
3262 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3263 (VCVTDQ2PDYrr VR128:$src)>;
3264 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3265 (VCVTDQ2PDYrm addr:$src)>;
3267 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3268 (VCVTPD2DQYrr VR256:$src)>;
3269 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3270 (VCVTPD2DQYrm addr:$src)>;
3272 //===---------------------------------------------------------------------===//
3273 // SSE3 - Move Instructions
3274 //===---------------------------------------------------------------------===//
3276 // Replicate Single FP
3277 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3278 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3279 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3280 [(set VR128:$dst, (v4f32 (rep_frag
3281 VR128:$src, (undef))))]>;
3282 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3283 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3284 [(set VR128:$dst, (rep_frag
3285 (memopv4f32 addr:$src), (undef)))]>;
3288 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3290 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3291 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3292 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3293 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3296 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3297 // FIXME: Merge above classes when we have patterns for the ymm version
3298 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3299 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3300 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3301 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3303 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3304 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3306 // Replicate Double FP
3307 multiclass sse3_replicate_dfp<string OpcodeStr> {
3308 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3309 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3310 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3311 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3312 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3314 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3318 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3319 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3320 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3322 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3323 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3327 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3328 // FIXME: Merge above classes when we have patterns for the ymm version
3329 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3330 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3332 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3334 // Move Unaligned Integer
3335 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3336 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3337 "vlddqu\t{$src, $dst|$dst, $src}",
3338 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3339 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3340 "vlddqu\t{$src, $dst|$dst, $src}",
3341 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3343 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3344 "lddqu\t{$src, $dst|$dst, $src}",
3345 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3347 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3349 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3351 // Several Move patterns
3352 let AddedComplexity = 5 in {
3353 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3354 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3355 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3356 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3357 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3358 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3359 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3360 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3363 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3364 let AddedComplexity = 15 in
3365 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3366 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3367 let AddedComplexity = 20 in
3368 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3369 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3371 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3372 let AddedComplexity = 15 in
3373 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3374 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3375 let AddedComplexity = 20 in
3376 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3377 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3379 //===---------------------------------------------------------------------===//
3380 // SSE3 - Arithmetic
3381 //===---------------------------------------------------------------------===//
3383 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3384 X86MemOperand x86memop, bit Is2Addr = 1> {
3385 def rr : I<0xD0, MRMSrcReg,
3386 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3388 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3389 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3390 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3391 def rm : I<0xD0, MRMSrcMem,
3392 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3394 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3395 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3396 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3399 let isAsmParserOnly = 1, Predicates = [HasAVX],
3400 ExeDomain = SSEPackedDouble in {
3401 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3402 f128mem, 0>, XD, VEX_4V;
3403 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3404 f128mem, 0>, OpSize, VEX_4V;
3405 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3406 f256mem, 0>, XD, VEX_4V;
3407 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3408 f256mem, 0>, OpSize, VEX_4V;
3410 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3411 ExeDomain = SSEPackedDouble in {
3412 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3414 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3415 f128mem>, TB, OpSize;
3418 //===---------------------------------------------------------------------===//
3419 // SSE3 Instructions
3420 //===---------------------------------------------------------------------===//
3423 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3424 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3425 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3427 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3428 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3429 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3431 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3433 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3434 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3435 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3437 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3438 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3439 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3441 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3442 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3443 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3445 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3447 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3448 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3449 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3452 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3453 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3454 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3455 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3456 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3457 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3458 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3459 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3460 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3461 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3462 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3463 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3464 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3465 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3466 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3467 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3468 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3471 let Constraints = "$src1 = $dst" in {
3472 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3473 int_x86_sse3_hadd_ps>;
3474 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3475 int_x86_sse3_hadd_pd>;
3476 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3477 int_x86_sse3_hsub_ps>;
3478 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3479 int_x86_sse3_hsub_pd>;
3482 //===---------------------------------------------------------------------===//
3483 // SSSE3 - Packed Absolute Instructions
3484 //===---------------------------------------------------------------------===//
3487 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3488 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3489 PatFrag mem_frag128, Intrinsic IntId128> {
3490 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3492 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3493 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3496 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3498 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3501 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3504 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3505 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3506 int_x86_ssse3_pabs_b_128>, VEX;
3507 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3508 int_x86_ssse3_pabs_w_128>, VEX;
3509 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3510 int_x86_ssse3_pabs_d_128>, VEX;
3513 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3514 int_x86_ssse3_pabs_b_128>;
3515 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3516 int_x86_ssse3_pabs_w_128>;
3517 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3518 int_x86_ssse3_pabs_d_128>;
3520 //===---------------------------------------------------------------------===//
3521 // SSSE3 - Packed Binary Operator Instructions
3522 //===---------------------------------------------------------------------===//
3524 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3525 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3526 PatFrag mem_frag128, Intrinsic IntId128,
3528 let isCommutable = 1 in
3529 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3530 (ins VR128:$src1, VR128:$src2),
3532 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3533 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3534 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3536 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3537 (ins VR128:$src1, i128mem:$src2),
3539 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3540 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3542 (IntId128 VR128:$src1,
3543 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3546 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3547 let isCommutable = 0 in {
3548 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3549 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3550 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3551 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3552 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3553 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3554 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3555 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3556 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3557 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3558 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3559 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3560 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3561 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3562 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3563 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3564 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3565 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3566 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3567 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3568 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3569 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3571 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3572 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3575 // None of these have i8 immediate fields.
3576 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3577 let isCommutable = 0 in {
3578 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3579 int_x86_ssse3_phadd_w_128>;
3580 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3581 int_x86_ssse3_phadd_d_128>;
3582 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3583 int_x86_ssse3_phadd_sw_128>;
3584 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3585 int_x86_ssse3_phsub_w_128>;
3586 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3587 int_x86_ssse3_phsub_d_128>;
3588 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3589 int_x86_ssse3_phsub_sw_128>;
3590 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3591 int_x86_ssse3_pmadd_ub_sw_128>;
3592 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3593 int_x86_ssse3_pshuf_b_128>;
3594 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3595 int_x86_ssse3_psign_b_128>;
3596 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3597 int_x86_ssse3_psign_w_128>;
3598 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3599 int_x86_ssse3_psign_d_128>;
3601 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3602 int_x86_ssse3_pmul_hr_sw_128>;
3605 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3606 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3607 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3608 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3610 //===---------------------------------------------------------------------===//
3611 // SSSE3 - Packed Align Instruction Patterns
3612 //===---------------------------------------------------------------------===//
3614 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3615 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3616 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3618 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3620 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3622 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3623 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3625 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3627 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3631 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3632 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3633 let Constraints = "$src1 = $dst" in
3634 defm PALIGN : ssse3_palign<"palignr">;
3636 let AddedComplexity = 5 in {
3637 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3638 (PALIGNR128rr VR128:$src2, VR128:$src1,
3639 (SHUFFLE_get_palign_imm VR128:$src3))>,
3640 Requires<[HasSSSE3]>;
3641 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3642 (PALIGNR128rr VR128:$src2, VR128:$src1,
3643 (SHUFFLE_get_palign_imm VR128:$src3))>,
3644 Requires<[HasSSSE3]>;
3645 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3646 (PALIGNR128rr VR128:$src2, VR128:$src1,
3647 (SHUFFLE_get_palign_imm VR128:$src3))>,
3648 Requires<[HasSSSE3]>;
3649 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3650 (PALIGNR128rr VR128:$src2, VR128:$src1,
3651 (SHUFFLE_get_palign_imm VR128:$src3))>,
3652 Requires<[HasSSSE3]>;
3655 //===---------------------------------------------------------------------===//
3656 // SSSE3 Misc Instructions
3657 //===---------------------------------------------------------------------===//
3659 // Thread synchronization
3660 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3661 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3662 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3663 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3665 //===---------------------------------------------------------------------===//
3666 // Non-Instruction Patterns
3667 //===---------------------------------------------------------------------===//
3669 // extload f32 -> f64. This matches load+fextend because we have a hack in
3670 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3672 // Since these loads aren't folded into the fextend, we have to match it
3674 let Predicates = [HasSSE2] in
3675 def : Pat<(fextend (loadf32 addr:$src)),
3676 (CVTSS2SDrm addr:$src)>;
3679 let Predicates = [HasSSE2] in {
3680 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3681 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3682 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3683 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3684 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3685 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3686 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3687 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3688 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3689 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3690 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3691 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3692 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3693 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3694 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3695 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3696 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3697 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3698 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3699 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3700 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3701 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3702 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3703 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3704 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3705 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3706 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3707 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3708 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3709 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3712 // Move scalar to XMM zero-extended
3713 // movd to XMM register zero-extends
3714 let AddedComplexity = 15 in {
3715 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3716 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3717 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3718 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3719 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3720 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3721 (MOVSSrr (v4f32 (V_SET0PS)),
3722 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3723 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3724 (MOVSSrr (v4i32 (V_SET0PI)),
3725 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3728 // Splat v2f64 / v2i64
3729 let AddedComplexity = 10 in {
3730 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3731 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3732 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3733 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3734 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3735 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3736 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3737 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3740 // Special unary SHUFPSrri case.
3741 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3742 (SHUFPSrri VR128:$src1, VR128:$src1,
3743 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3744 let AddedComplexity = 5 in
3745 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3746 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3747 Requires<[HasSSE2]>;
3748 // Special unary SHUFPDrri case.
3749 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3750 (SHUFPDrri VR128:$src1, VR128:$src1,
3751 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3752 Requires<[HasSSE2]>;
3753 // Special unary SHUFPDrri case.
3754 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3755 (SHUFPDrri VR128:$src1, VR128:$src1,
3756 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3757 Requires<[HasSSE2]>;
3758 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3759 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3760 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3761 Requires<[HasSSE2]>;
3763 // Special binary v4i32 shuffle cases with SHUFPS.
3764 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3765 (SHUFPSrri VR128:$src1, VR128:$src2,
3766 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3767 Requires<[HasSSE2]>;
3768 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3769 (SHUFPSrmi VR128:$src1, addr:$src2,
3770 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3771 Requires<[HasSSE2]>;
3772 // Special binary v2i64 shuffle cases using SHUFPDrri.
3773 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3774 (SHUFPDrri VR128:$src1, VR128:$src2,
3775 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3776 Requires<[HasSSE2]>;
3778 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3779 let AddedComplexity = 15 in {
3780 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3781 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3782 Requires<[OptForSpeed, HasSSE2]>;
3783 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3784 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3785 Requires<[OptForSpeed, HasSSE2]>;
3787 let AddedComplexity = 10 in {
3788 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3789 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3790 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3791 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3792 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3793 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3794 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3795 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3798 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3799 let AddedComplexity = 15 in {
3800 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3801 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3802 Requires<[OptForSpeed, HasSSE2]>;
3803 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3804 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3805 Requires<[OptForSpeed, HasSSE2]>;
3807 let AddedComplexity = 10 in {
3808 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3809 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3810 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3811 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3812 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3813 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3814 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3815 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3818 let AddedComplexity = 20 in {
3819 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3820 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3821 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3823 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3824 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3825 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3827 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3828 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3829 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3830 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3831 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3834 let AddedComplexity = 20 in {
3835 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3836 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3837 (MOVLPSrm VR128:$src1, addr:$src2)>;
3838 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3839 (MOVLPDrm VR128:$src1, addr:$src2)>;
3840 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3841 (MOVLPSrm VR128:$src1, addr:$src2)>;
3842 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3843 (MOVLPDrm VR128:$src1, addr:$src2)>;
3846 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3847 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3848 (MOVLPSmr addr:$src1, VR128:$src2)>;
3849 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3850 (MOVLPDmr addr:$src1, VR128:$src2)>;
3851 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3853 (MOVLPSmr addr:$src1, VR128:$src2)>;
3854 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3855 (MOVLPDmr addr:$src1, VR128:$src2)>;
3857 let AddedComplexity = 15 in {
3858 // Setting the lowest element in the vector.
3859 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3860 (MOVSSrr (v4i32 VR128:$src1),
3861 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3862 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3863 (MOVSDrr (v2i64 VR128:$src1),
3864 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3866 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3867 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3868 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3869 Requires<[HasSSE2]>;
3870 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3871 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3872 Requires<[HasSSE2]>;
3875 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3876 // fall back to this for SSE1)
3877 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3878 (SHUFPSrri VR128:$src2, VR128:$src1,
3879 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3881 // Set lowest element and zero upper elements.
3882 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3883 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3885 // Some special case pandn patterns.
3886 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3888 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3889 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3891 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3892 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3894 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3896 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3897 (memop addr:$src2))),
3898 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3899 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3900 (memop addr:$src2))),
3901 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3902 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3903 (memop addr:$src2))),
3904 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3906 // vector -> vector casts
3907 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3908 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3909 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3910 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3912 // Use movaps / movups for SSE integer load / store (one byte shorter).
3913 let Predicates = [HasSSE1] in {
3914 def : Pat<(alignedloadv4i32 addr:$src),
3915 (MOVAPSrm addr:$src)>;
3916 def : Pat<(loadv4i32 addr:$src),
3917 (MOVUPSrm addr:$src)>;
3918 def : Pat<(alignedloadv2i64 addr:$src),
3919 (MOVAPSrm addr:$src)>;
3920 def : Pat<(loadv2i64 addr:$src),
3921 (MOVUPSrm addr:$src)>;
3923 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3924 (MOVAPSmr addr:$dst, VR128:$src)>;
3925 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3926 (MOVAPSmr addr:$dst, VR128:$src)>;
3927 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3928 (MOVAPSmr addr:$dst, VR128:$src)>;
3929 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3930 (MOVAPSmr addr:$dst, VR128:$src)>;
3931 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3932 (MOVUPSmr addr:$dst, VR128:$src)>;
3933 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3934 (MOVUPSmr addr:$dst, VR128:$src)>;
3935 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3936 (MOVUPSmr addr:$dst, VR128:$src)>;
3937 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3938 (MOVUPSmr addr:$dst, VR128:$src)>;
3941 // Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
3942 let Predicates = [HasAVX] in {
3943 def : Pat<(alignedloadv4i32 addr:$src),
3944 (VMOVAPSrm addr:$src)>;
3945 def : Pat<(loadv4i32 addr:$src),
3946 (VMOVUPSrm addr:$src)>;
3947 def : Pat<(alignedloadv2i64 addr:$src),
3948 (VMOVAPSrm addr:$src)>;
3949 def : Pat<(loadv2i64 addr:$src),
3950 (VMOVUPSrm addr:$src)>;
3952 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3953 (VMOVAPSmr addr:$dst, VR128:$src)>;
3954 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3955 (VMOVAPSmr addr:$dst, VR128:$src)>;
3956 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3957 (VMOVAPSmr addr:$dst, VR128:$src)>;
3958 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3959 (VMOVAPSmr addr:$dst, VR128:$src)>;
3960 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3961 (VMOVUPSmr addr:$dst, VR128:$src)>;
3962 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3963 (VMOVUPSmr addr:$dst, VR128:$src)>;
3964 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3965 (VMOVUPSmr addr:$dst, VR128:$src)>;
3966 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3967 (VMOVUPSmr addr:$dst, VR128:$src)>;
3970 //===----------------------------------------------------------------------===//
3971 // SSE4.1 - Packed Move with Sign/Zero Extend
3972 //===----------------------------------------------------------------------===//
3974 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3975 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3976 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3977 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3979 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3980 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3982 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3986 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3987 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3989 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3991 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3993 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3995 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3997 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4001 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4002 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4003 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4004 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4005 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4006 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4008 // Common patterns involving scalar load.
4009 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4010 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4011 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4012 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4014 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4015 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4016 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4017 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4019 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4020 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4021 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4022 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4024 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4025 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4026 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4027 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4029 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4030 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4031 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4032 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4034 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4035 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4036 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4037 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4040 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4041 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4042 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4043 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4045 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4046 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4048 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4052 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4053 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4055 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4057 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4059 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4063 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4064 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4065 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4066 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4068 // Common patterns involving scalar load
4069 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4070 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4071 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4072 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4074 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4075 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4076 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4077 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4080 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4081 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4082 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4083 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4085 // Expecting a i16 load any extended to i32 value.
4086 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4087 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4088 [(set VR128:$dst, (IntId (bitconvert
4089 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4093 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4094 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4096 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4099 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4100 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4102 // Common patterns involving scalar load
4103 def : Pat<(int_x86_sse41_pmovsxbq
4104 (bitconvert (v4i32 (X86vzmovl
4105 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4106 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4108 def : Pat<(int_x86_sse41_pmovzxbq
4109 (bitconvert (v4i32 (X86vzmovl
4110 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4111 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4113 //===----------------------------------------------------------------------===//
4114 // SSE4.1 - Extract Instructions
4115 //===----------------------------------------------------------------------===//
4117 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4118 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4119 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4120 (ins VR128:$src1, i32i8imm:$src2),
4121 !strconcat(OpcodeStr,
4122 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4123 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4125 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4126 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4127 !strconcat(OpcodeStr,
4128 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4131 // There's an AssertZext in the way of writing the store pattern
4132 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4135 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4136 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4137 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4138 (ins VR128:$src1, i32i8imm:$src2),
4139 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4142 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4145 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4146 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4147 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4148 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4149 !strconcat(OpcodeStr,
4150 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4153 // There's an AssertZext in the way of writing the store pattern
4154 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4157 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4158 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4160 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4163 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4164 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4165 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4166 (ins VR128:$src1, i32i8imm:$src2),
4167 !strconcat(OpcodeStr,
4168 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4170 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4171 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4172 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4173 !strconcat(OpcodeStr,
4174 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4175 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4176 addr:$dst)]>, OpSize;
4179 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4180 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4182 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4184 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4185 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4186 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4187 (ins VR128:$src1, i32i8imm:$src2),
4188 !strconcat(OpcodeStr,
4189 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4191 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4192 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4193 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4194 !strconcat(OpcodeStr,
4195 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4196 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4197 addr:$dst)]>, OpSize, REX_W;
4200 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4201 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4203 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4205 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4207 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4208 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4209 (ins VR128:$src1, i32i8imm:$src2),
4210 !strconcat(OpcodeStr,
4211 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4213 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4215 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4216 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4217 !strconcat(OpcodeStr,
4218 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4219 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4220 addr:$dst)]>, OpSize;
4223 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4224 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4225 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4226 (ins VR128:$src1, i32i8imm:$src2),
4227 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4230 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4232 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4233 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4236 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4237 Requires<[HasSSE41]>;
4239 //===----------------------------------------------------------------------===//
4240 // SSE4.1 - Insert Instructions
4241 //===----------------------------------------------------------------------===//
4243 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4244 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4245 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4247 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4249 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4251 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4252 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4253 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4255 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4257 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4259 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4260 imm:$src3))]>, OpSize;
4263 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4264 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4265 let Constraints = "$src1 = $dst" in
4266 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4268 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4269 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4270 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4272 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4274 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4276 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4278 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4279 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4281 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4283 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4285 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4286 imm:$src3)))]>, OpSize;
4289 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4290 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4291 let Constraints = "$src1 = $dst" in
4292 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4294 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4295 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4296 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4298 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4300 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4302 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4304 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4305 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4307 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4309 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4311 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4312 imm:$src3)))]>, OpSize;
4315 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4316 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4317 let Constraints = "$src1 = $dst" in
4318 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4320 // insertps has a few different modes, there's the first two here below which
4321 // are optimized inserts that won't zero arbitrary elements in the destination
4322 // vector. The next one matches the intrinsic and could zero arbitrary elements
4323 // in the target vector.
4324 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4325 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4326 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4328 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4330 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4332 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4334 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4335 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4337 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4339 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4341 (X86insrtps VR128:$src1,
4342 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4343 imm:$src3))]>, OpSize;
4346 let Constraints = "$src1 = $dst" in
4347 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4348 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4349 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4351 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4352 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4354 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4355 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4356 Requires<[HasSSE41]>;
4358 //===----------------------------------------------------------------------===//
4359 // SSE4.1 - Round Instructions
4360 //===----------------------------------------------------------------------===//
4362 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4363 X86MemOperand x86memop, RegisterClass RC,
4364 PatFrag mem_frag32, PatFrag mem_frag64,
4365 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4366 // Intrinsic operation, reg.
4367 // Vector intrinsic operation, reg
4368 def PSr : SS4AIi8<opcps, MRMSrcReg,
4369 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4370 !strconcat(OpcodeStr,
4371 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4372 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4375 // Vector intrinsic operation, mem
4376 def PSm : Ii8<opcps, MRMSrcMem,
4377 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4378 !strconcat(OpcodeStr,
4379 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4381 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4383 Requires<[HasSSE41]>;
4385 // Vector intrinsic operation, reg
4386 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4387 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4388 !strconcat(OpcodeStr,
4389 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4390 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4393 // Vector intrinsic operation, mem
4394 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4395 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4396 !strconcat(OpcodeStr,
4397 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4399 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4403 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4404 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4405 // Intrinsic operation, reg.
4406 // Vector intrinsic operation, reg
4407 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4408 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4409 !strconcat(OpcodeStr,
4410 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4413 // Vector intrinsic operation, mem
4414 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4415 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4416 !strconcat(OpcodeStr,
4417 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4418 []>, TA, OpSize, Requires<[HasSSE41]>;
4420 // Vector intrinsic operation, reg
4421 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4422 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4423 !strconcat(OpcodeStr,
4424 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4427 // Vector intrinsic operation, mem
4428 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4429 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4430 !strconcat(OpcodeStr,
4431 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4435 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4438 Intrinsic F64Int, bit Is2Addr = 1> {
4439 // Intrinsic operation, reg.
4440 def SSr : SS4AIi8<opcss, MRMSrcReg,
4441 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4443 !strconcat(OpcodeStr,
4444 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4445 !strconcat(OpcodeStr,
4446 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4447 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4450 // Intrinsic operation, mem.
4451 def SSm : SS4AIi8<opcss, MRMSrcMem,
4452 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4454 !strconcat(OpcodeStr,
4455 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4456 !strconcat(OpcodeStr,
4457 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4459 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4462 // Intrinsic operation, reg.
4463 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4464 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4466 !strconcat(OpcodeStr,
4467 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4468 !strconcat(OpcodeStr,
4469 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4470 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4473 // Intrinsic operation, mem.
4474 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4475 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4477 !strconcat(OpcodeStr,
4478 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4479 !strconcat(OpcodeStr,
4480 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4482 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4486 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4488 // Intrinsic operation, reg.
4489 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4490 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4491 !strconcat(OpcodeStr,
4492 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4495 // Intrinsic operation, mem.
4496 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4497 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4498 !strconcat(OpcodeStr,
4499 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4502 // Intrinsic operation, reg.
4503 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4504 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4505 !strconcat(OpcodeStr,
4506 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4509 // Intrinsic operation, mem.
4510 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4511 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4512 !strconcat(OpcodeStr,
4513 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4517 // FP round - roundss, roundps, roundsd, roundpd
4518 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4520 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4521 memopv4f32, memopv2f64,
4522 int_x86_sse41_round_ps,
4523 int_x86_sse41_round_pd>, VEX;
4524 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4525 memopv8f32, memopv4f64,
4526 int_x86_avx_round_ps_256,
4527 int_x86_avx_round_pd_256>, VEX;
4528 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4529 int_x86_sse41_round_ss,
4530 int_x86_sse41_round_sd, 0>, VEX_4V;
4532 // Instructions for the assembler
4533 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4535 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4537 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4540 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4541 memopv4f32, memopv2f64,
4542 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4543 let Constraints = "$src1 = $dst" in
4544 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4545 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4547 //===----------------------------------------------------------------------===//
4548 // SSE4.1 - Packed Bit Test
4549 //===----------------------------------------------------------------------===//
4551 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4552 // the intel intrinsic that corresponds to this.
4553 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4554 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4555 "vptest\t{$src2, $src1|$src1, $src2}",
4556 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4558 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4559 "vptest\t{$src2, $src1|$src1, $src2}",
4560 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4563 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4564 "vptest\t{$src2, $src1|$src1, $src2}",
4565 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4567 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4568 "vptest\t{$src2, $src1|$src1, $src2}",
4569 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4573 let Defs = [EFLAGS] in {
4574 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4575 "ptest \t{$src2, $src1|$src1, $src2}",
4576 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4578 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4579 "ptest \t{$src2, $src1|$src1, $src2}",
4580 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4584 // The bit test instructions below are AVX only
4585 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4586 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4587 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4588 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4589 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4590 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4591 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4592 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4596 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4597 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4598 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4599 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4600 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4603 //===----------------------------------------------------------------------===//
4604 // SSE4.1 - Misc Instructions
4605 //===----------------------------------------------------------------------===//
4607 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4608 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4609 Intrinsic IntId128> {
4610 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4612 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4613 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4614 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4616 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4619 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4622 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4623 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4624 int_x86_sse41_phminposuw>, VEX;
4625 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4626 int_x86_sse41_phminposuw>;
4628 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4629 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4630 Intrinsic IntId128, bit Is2Addr = 1> {
4631 let isCommutable = 1 in
4632 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4633 (ins VR128:$src1, VR128:$src2),
4635 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4636 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4637 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4638 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4639 (ins VR128:$src1, i128mem:$src2),
4641 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4642 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4644 (IntId128 VR128:$src1,
4645 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4648 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4649 let isCommutable = 0 in
4650 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4652 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4654 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4656 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4658 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4660 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4662 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4664 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4666 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4668 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4670 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4674 let Constraints = "$src1 = $dst" in {
4675 let isCommutable = 0 in
4676 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4677 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4678 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4679 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4680 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4681 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4682 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4683 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4684 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4685 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4686 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4689 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4690 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4691 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4692 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4694 /// SS48I_binop_rm - Simple SSE41 binary operator.
4695 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4696 ValueType OpVT, bit Is2Addr = 1> {
4697 let isCommutable = 1 in
4698 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4699 (ins VR128:$src1, VR128:$src2),
4701 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4702 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4703 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4705 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4706 (ins VR128:$src1, i128mem:$src2),
4708 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4709 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4710 [(set VR128:$dst, (OpNode VR128:$src1,
4711 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4715 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4716 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4717 let Constraints = "$src1 = $dst" in
4718 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4720 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4721 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4722 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4723 X86MemOperand x86memop, bit Is2Addr = 1> {
4724 let isCommutable = 1 in
4725 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4726 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4728 !strconcat(OpcodeStr,
4729 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4730 !strconcat(OpcodeStr,
4731 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4732 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4734 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4735 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4737 !strconcat(OpcodeStr,
4738 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4739 !strconcat(OpcodeStr,
4740 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4743 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4747 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4748 let isCommutable = 0 in {
4749 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4750 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4751 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4752 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4753 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4754 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4755 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4756 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4757 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4758 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4759 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4760 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4762 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4763 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4764 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4765 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4766 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4767 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4770 let Constraints = "$src1 = $dst" in {
4771 let isCommutable = 0 in {
4772 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4773 VR128, memopv16i8, i128mem>;
4774 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4775 VR128, memopv16i8, i128mem>;
4776 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4777 VR128, memopv16i8, i128mem>;
4778 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4779 VR128, memopv16i8, i128mem>;
4781 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4782 VR128, memopv16i8, i128mem>;
4783 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4784 VR128, memopv16i8, i128mem>;
4787 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4788 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4789 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4790 RegisterClass RC, X86MemOperand x86memop,
4791 PatFrag mem_frag, Intrinsic IntId> {
4792 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4793 (ins RC:$src1, RC:$src2, RC:$src3),
4794 !strconcat(OpcodeStr,
4795 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4796 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4797 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4799 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4800 (ins RC:$src1, x86memop:$src2, RC:$src3),
4801 !strconcat(OpcodeStr,
4802 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4804 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4806 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4810 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4811 memopv16i8, int_x86_sse41_blendvpd>;
4812 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4813 memopv16i8, int_x86_sse41_blendvps>;
4814 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4815 memopv16i8, int_x86_sse41_pblendvb>;
4816 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4817 memopv32i8, int_x86_avx_blendv_pd_256>;
4818 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4819 memopv32i8, int_x86_avx_blendv_ps_256>;
4821 /// SS41I_ternary_int - SSE 4.1 ternary operator
4822 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4823 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4824 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4825 (ins VR128:$src1, VR128:$src2),
4826 !strconcat(OpcodeStr,
4827 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4828 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4831 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4832 (ins VR128:$src1, i128mem:$src2),
4833 !strconcat(OpcodeStr,
4834 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4837 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4841 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4842 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4843 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4845 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4846 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4847 "vmovntdqa\t{$src, $dst|$dst, $src}",
4848 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4850 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4851 "movntdqa\t{$src, $dst|$dst, $src}",
4852 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4855 //===----------------------------------------------------------------------===//
4856 // SSE4.2 - Compare Instructions
4857 //===----------------------------------------------------------------------===//
4859 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4860 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4861 Intrinsic IntId128, bit Is2Addr = 1> {
4862 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4863 (ins VR128:$src1, VR128:$src2),
4865 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4866 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4867 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4869 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4870 (ins VR128:$src1, i128mem:$src2),
4872 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4873 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4875 (IntId128 VR128:$src1,
4876 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4879 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4880 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4882 let Constraints = "$src1 = $dst" in
4883 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4885 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4886 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4887 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4888 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4890 //===----------------------------------------------------------------------===//
4891 // SSE4.2 - String/text Processing Instructions
4892 //===----------------------------------------------------------------------===//
4894 // Packed Compare Implicit Length Strings, Return Mask
4895 multiclass pseudo_pcmpistrm<string asm> {
4896 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
4897 (ins VR128:$src1, VR128:$src2, i8imm:$src3), !strconcat(asm, "rr PSEUDO"),
4898 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4900 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
4901 (ins VR128:$src1, i128mem:$src2, i8imm:$src3), !strconcat(asm, "rm PSEUDO"),
4902 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4903 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4906 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4907 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4908 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4911 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4912 Predicates = [HasAVX] in {
4913 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4914 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4915 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4916 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4917 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4918 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4921 let Defs = [XMM0, EFLAGS] in {
4922 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4923 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4924 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4925 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4926 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4927 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4930 // Packed Compare Explicit Length Strings, Return Mask
4931 multiclass pseudo_pcmpestrm<string asm> {
4932 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
4933 (ins VR128:$src1, VR128:$src3, i8imm:$src5), !strconcat(asm, "rr PSEUDO"),
4934 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4935 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4936 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
4937 (ins VR128:$src1, i128mem:$src3, i8imm:$src5), !strconcat(asm, "rm PSEUDO"),
4938 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4939 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4942 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4943 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4944 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4947 let isAsmParserOnly = 1, Predicates = [HasAVX],
4948 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4949 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4950 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4951 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4952 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4953 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4954 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4957 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4958 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4959 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4960 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4961 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4962 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4963 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4966 // Packed Compare Implicit Length Strings, Return Index
4967 let Defs = [ECX, EFLAGS] in {
4968 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4969 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4970 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4971 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4972 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4973 (implicit EFLAGS)]>, OpSize;
4974 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4975 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4976 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4977 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4978 (implicit EFLAGS)]>, OpSize;
4982 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4983 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4985 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4987 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4989 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4991 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
4993 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
4997 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
4998 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
4999 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5000 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5001 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5002 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5004 // Packed Compare Explicit Length Strings, Return Index
5005 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5006 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5007 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5008 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5009 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5010 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5011 (implicit EFLAGS)]>, OpSize;
5012 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5013 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5014 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5016 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5017 (implicit EFLAGS)]>, OpSize;
5021 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
5022 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5024 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5026 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5028 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5030 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5032 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5036 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5037 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5038 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5039 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5040 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5041 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5043 //===----------------------------------------------------------------------===//
5044 // SSE4.2 - CRC Instructions
5045 //===----------------------------------------------------------------------===//
5047 // No CRC instructions have AVX equivalents
5049 // crc intrinsic instruction
5050 // This set of instructions are only rm, the only difference is the size
5052 let Constraints = "$src1 = $dst" in {
5053 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5054 (ins GR32:$src1, i8mem:$src2),
5055 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5057 (int_x86_sse42_crc32_8 GR32:$src1,
5058 (load addr:$src2)))]>;
5059 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5060 (ins GR32:$src1, GR8:$src2),
5061 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5063 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
5064 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5065 (ins GR32:$src1, i16mem:$src2),
5066 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5068 (int_x86_sse42_crc32_16 GR32:$src1,
5069 (load addr:$src2)))]>,
5071 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5072 (ins GR32:$src1, GR16:$src2),
5073 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5075 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5077 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5078 (ins GR32:$src1, i32mem:$src2),
5079 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5081 (int_x86_sse42_crc32_32 GR32:$src1,
5082 (load addr:$src2)))]>;
5083 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5084 (ins GR32:$src1, GR32:$src2),
5085 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5087 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5088 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5089 (ins GR64:$src1, i8mem:$src2),
5090 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5092 (int_x86_sse42_crc64_8 GR64:$src1,
5093 (load addr:$src2)))]>,
5095 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5096 (ins GR64:$src1, GR8:$src2),
5097 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5099 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5101 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5102 (ins GR64:$src1, i64mem:$src2),
5103 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5105 (int_x86_sse42_crc64_64 GR64:$src1,
5106 (load addr:$src2)))]>,
5108 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5109 (ins GR64:$src1, GR64:$src2),
5110 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5112 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5116 //===----------------------------------------------------------------------===//
5117 // AES-NI Instructions
5118 //===----------------------------------------------------------------------===//
5120 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5121 Intrinsic IntId128, bit Is2Addr = 1> {
5122 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5123 (ins VR128:$src1, VR128:$src2),
5125 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5126 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5127 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5129 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5130 (ins VR128:$src1, i128mem:$src2),
5132 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5133 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5135 (IntId128 VR128:$src1,
5136 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5139 // Perform One Round of an AES Encryption/Decryption Flow
5140 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5141 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5142 int_x86_aesni_aesenc, 0>, VEX_4V;
5143 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5144 int_x86_aesni_aesenclast, 0>, VEX_4V;
5145 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5146 int_x86_aesni_aesdec, 0>, VEX_4V;
5147 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5148 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5151 let Constraints = "$src1 = $dst" in {
5152 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5153 int_x86_aesni_aesenc>;
5154 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5155 int_x86_aesni_aesenclast>;
5156 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5157 int_x86_aesni_aesdec>;
5158 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5159 int_x86_aesni_aesdeclast>;
5162 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5163 (AESENCrr VR128:$src1, VR128:$src2)>;
5164 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5165 (AESENCrm VR128:$src1, addr:$src2)>;
5166 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5167 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5168 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5169 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5170 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5171 (AESDECrr VR128:$src1, VR128:$src2)>;
5172 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5173 (AESDECrm VR128:$src1, addr:$src2)>;
5174 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5175 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5176 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5177 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5179 // Perform the AES InvMixColumn Transformation
5180 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5181 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5183 "vaesimc\t{$src1, $dst|$dst, $src1}",
5185 (int_x86_aesni_aesimc VR128:$src1))]>,
5187 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5188 (ins i128mem:$src1),
5189 "vaesimc\t{$src1, $dst|$dst, $src1}",
5191 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5194 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5196 "aesimc\t{$src1, $dst|$dst, $src1}",
5198 (int_x86_aesni_aesimc VR128:$src1))]>,
5200 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5201 (ins i128mem:$src1),
5202 "aesimc\t{$src1, $dst|$dst, $src1}",
5204 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5207 // AES Round Key Generation Assist
5208 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5209 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5210 (ins VR128:$src1, i8imm:$src2),
5211 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5213 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5215 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5216 (ins i128mem:$src1, i8imm:$src2),
5217 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5219 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5223 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5224 (ins VR128:$src1, i8imm:$src2),
5225 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5227 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5229 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5230 (ins i128mem:$src1, i8imm:$src2),
5231 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5233 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5237 //===----------------------------------------------------------------------===//
5238 // CLMUL Instructions
5239 //===----------------------------------------------------------------------===//
5241 // Only the AVX version of CLMUL instructions are described here.
5243 // Carry-less Multiplication instructions
5244 let isAsmParserOnly = 1 in {
5245 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5246 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5247 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5250 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5251 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5252 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5256 multiclass avx_vpclmul<string asm> {
5257 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5258 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5261 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5262 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5265 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5266 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5267 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5268 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5270 } // isAsmParserOnly
5272 //===----------------------------------------------------------------------===//
5274 //===----------------------------------------------------------------------===//
5276 let isAsmParserOnly = 1 in {
5278 // Load from memory and broadcast to all elements of the destination operand
5279 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5280 X86MemOperand x86memop, Intrinsic Int> :
5281 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5282 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5283 [(set RC:$dst, (Int addr:$src))]>, VEX;
5285 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5286 int_x86_avx_vbroadcastss>;
5287 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5288 int_x86_avx_vbroadcastss_256>;
5289 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5290 int_x86_avx_vbroadcast_sd_256>;
5291 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5292 int_x86_avx_vbroadcastf128_pd_256>;
5294 // Insert packed floating-point values
5295 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5296 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5297 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5299 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5300 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5301 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5304 // Extract packed floating-point values
5305 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5306 (ins VR256:$src1, i8imm:$src2),
5307 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5309 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5310 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5311 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5314 // Conditional SIMD Packed Loads and Stores
5315 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5316 Intrinsic IntLd, Intrinsic IntLd256,
5317 Intrinsic IntSt, Intrinsic IntSt256,
5318 PatFrag pf128, PatFrag pf256> {
5319 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5320 (ins VR128:$src1, f128mem:$src2),
5321 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5322 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5324 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5325 (ins VR256:$src1, f256mem:$src2),
5326 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5327 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5329 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5330 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5331 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5332 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5333 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5334 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5335 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5336 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5339 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5340 int_x86_avx_maskload_ps,
5341 int_x86_avx_maskload_ps_256,
5342 int_x86_avx_maskstore_ps,
5343 int_x86_avx_maskstore_ps_256,
5344 memopv4f32, memopv8f32>;
5345 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5346 int_x86_avx_maskload_pd,
5347 int_x86_avx_maskload_pd_256,
5348 int_x86_avx_maskstore_pd,
5349 int_x86_avx_maskstore_pd_256,
5350 memopv2f64, memopv4f64>;
5352 // Permute Floating-Point Values
5353 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5354 RegisterClass RC, X86MemOperand x86memop_f,
5355 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5356 Intrinsic IntVar, Intrinsic IntImm> {
5357 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5358 (ins RC:$src1, RC:$src2),
5359 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5360 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5361 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5362 (ins RC:$src1, x86memop_i:$src2),
5363 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5364 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5366 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5367 (ins RC:$src1, i8imm:$src2),
5368 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5369 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5370 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5371 (ins x86memop_f:$src1, i8imm:$src2),
5372 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5373 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5376 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5377 memopv4f32, memopv4i32,
5378 int_x86_avx_vpermilvar_ps,
5379 int_x86_avx_vpermil_ps>;
5380 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5381 memopv8f32, memopv8i32,
5382 int_x86_avx_vpermilvar_ps_256,
5383 int_x86_avx_vpermil_ps_256>;
5384 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5385 memopv2f64, memopv2i64,
5386 int_x86_avx_vpermilvar_pd,
5387 int_x86_avx_vpermil_pd>;
5388 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5389 memopv4f64, memopv4i64,
5390 int_x86_avx_vpermilvar_pd_256,
5391 int_x86_avx_vpermil_pd_256>;
5393 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5394 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5395 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5397 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5398 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5399 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5402 // Zero All YMM registers
5403 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5404 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5406 // Zero Upper bits of YMM registers
5407 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5408 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5410 } // isAsmParserOnly
5412 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5413 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5414 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5415 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5416 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5417 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5419 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5420 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5421 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5422 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5423 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5424 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5426 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5427 (VBROADCASTF128 addr:$src)>;
5429 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5430 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5431 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5432 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5433 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5434 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5436 def : Pat<(int_x86_avx_vperm2f128_ps_256
5437 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5438 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5439 def : Pat<(int_x86_avx_vperm2f128_pd_256
5440 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5441 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5442 def : Pat<(int_x86_avx_vperm2f128_si_256
5443 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5444 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5446 //===----------------------------------------------------------------------===//
5447 // SSE Shuffle pattern fragments
5448 //===----------------------------------------------------------------------===//
5450 // This is part of a "work in progress" refactoring. The idea is that all
5451 // vector shuffles are going to be translated into target specific nodes and
5452 // directly matched by the patterns below (which can be changed along the way)
5453 // The AVX version of some but not all of them are described here, and more
5454 // should come in a near future.
5456 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5457 // SSE2 loads, which are always promoted to v2i64. The last one should match
5458 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5459 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5460 // we investigate further.
5461 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5463 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5464 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5466 (PSHUFDmi addr:$src1, imm:$imm)>;
5467 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5469 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5471 // Shuffle with PSHUFD instruction.
5472 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5473 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5474 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5475 (PSHUFDri VR128:$src1, imm:$imm)>;
5477 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5478 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5479 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5480 (PSHUFDri VR128:$src1, imm:$imm)>;
5482 // Shuffle with SHUFPD instruction.
5483 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5484 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5485 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5486 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5487 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5488 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5490 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5491 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5492 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5493 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5495 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5496 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5497 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5498 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5500 // Shuffle with SHUFPS instruction.
5501 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5502 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5503 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5504 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5505 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5506 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5508 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5509 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5510 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5511 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5513 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5514 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5515 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5516 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5517 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5518 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5520 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5521 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5522 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5523 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5525 // Shuffle with MOVHLPS instruction
5526 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5527 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5528 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5529 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5531 // Shuffle with MOVDDUP instruction
5532 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5533 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5534 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5535 (MOVDDUPrm addr:$src)>;
5537 def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
5538 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5539 def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
5540 (MOVDDUPrm addr:$src)>;
5542 def : Pat<(X86Movddup (memopv2i64 addr:$src)),
5543 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5544 def : Pat<(X86Movddup (memopv2i64 addr:$src)),
5545 (MOVDDUPrm addr:$src)>;
5547 def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
5548 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5549 def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
5550 (MOVDDUPrm addr:$src)>;
5552 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5553 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5554 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5555 (MOVDDUPrm addr:$src)>;
5557 def : Pat<(X86Movddup (bc_v2f64
5558 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5559 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5560 def : Pat<(X86Movddup (bc_v2f64
5561 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5562 (MOVDDUPrm addr:$src)>;
5564 // Shuffle with UNPCKLPS
5565 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5566 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5567 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5568 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5570 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5571 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5572 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5573 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5575 // Shuffle with UNPCKHPS
5576 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5577 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5578 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5579 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5581 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5582 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5583 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5584 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5586 // Shuffle with UNPCKLPD
5587 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5588 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5589 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5590 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5592 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5593 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5594 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5595 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5597 // Shuffle with UNPCKHPD
5598 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5599 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5600 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5601 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5603 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5604 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5605 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5606 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5608 // Shuffle with PUNPCKLBW
5609 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5610 (bc_v16i8 (memopv2i64 addr:$src2)))),
5611 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5612 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5613 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5615 // Shuffle with PUNPCKLWD
5616 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5617 (bc_v8i16 (memopv2i64 addr:$src2)))),
5618 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5619 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5620 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5622 // Shuffle with PUNPCKLDQ
5623 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5624 (bc_v4i32 (memopv2i64 addr:$src2)))),
5625 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5626 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5627 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5629 // Shuffle with PUNPCKLQDQ
5630 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5631 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5632 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5633 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5635 // Shuffle with PUNPCKHBW
5636 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5637 (bc_v16i8 (memopv2i64 addr:$src2)))),
5638 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5639 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5640 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5642 // Shuffle with PUNPCKHWD
5643 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5644 (bc_v8i16 (memopv2i64 addr:$src2)))),
5645 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5646 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5647 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5649 // Shuffle with PUNPCKHDQ
5650 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5651 (bc_v4i32 (memopv2i64 addr:$src2)))),
5652 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5653 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5654 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5656 // Shuffle with PUNPCKHQDQ
5657 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5658 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5659 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5660 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5662 // Shuffle with MOVLHPS
5663 def : Pat<(X86Movlhps VR128:$src1,
5664 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5665 (MOVHPSrm VR128:$src1, addr:$src2)>;
5666 def : Pat<(X86Movlhps VR128:$src1,
5667 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5668 (MOVHPSrm VR128:$src1, addr:$src2)>;
5669 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5670 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5671 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5672 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5673 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5674 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5675 // FIXME: Instead of X86Movddup, there should be a X86Movlhps here, the problem
5676 // is during lowering, where it's not possible to recognize the load fold cause
5677 // it has two uses through a bitcast. One use disappears at isel time and the
5678 // fold opportunity reappears.
5679 def : Pat<(v2i64 (X86Movddup VR128:$src)),
5680 (MOVLHPSrr VR128:$src, VR128:$src)>;
5681 def : Pat<(v4f32 (X86Movddup VR128:$src)),
5682 (MOVLHPSrr VR128:$src, VR128:$src)>;
5683 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5684 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5686 // Shuffle with MOVLHPD
5687 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5688 (scalar_to_vector (loadf64 addr:$src2)))),
5689 (MOVHPDrm VR128:$src1, addr:$src2)>;
5690 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5691 // is during lowering, where it's not possible to recognize the load fold cause
5692 // it has two uses through a bitcast. One use disappears at isel time and the
5693 // fold opportunity reappears.
5694 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5695 (scalar_to_vector (loadf64 addr:$src2)))),
5696 (MOVHPDrm VR128:$src1, addr:$src2)>;
5698 // Shuffle with MOVSS
5699 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5700 (MOVSSrr VR128:$src1, FR32:$src2)>;
5701 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5702 (MOVSSrr (v4i32 VR128:$src1),
5703 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5704 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5705 (MOVSSrr (v4f32 VR128:$src1),
5706 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5707 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5708 // is during lowering, where it's not possible to recognize the load fold cause
5709 // it has two uses through a bitcast. One use disappears at isel time and the
5710 // fold opportunity reappears.
5711 def : Pat<(X86Movss VR128:$src1,
5712 (bc_v4i32 (v2i64 (load addr:$src2)))),
5713 (MOVLPSrm VR128:$src1, addr:$src2)>;
5715 // Shuffle with MOVSD
5716 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5717 (MOVSDrr VR128:$src1, FR64:$src2)>;
5718 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5719 (MOVSDrr (v2i64 VR128:$src1),
5720 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5721 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5722 (MOVSDrr (v2f64 VR128:$src1),
5723 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5724 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5725 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5726 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5727 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5729 // Shuffle with MOVSHDUP
5730 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5731 (MOVSHDUPrr VR128:$src)>;
5732 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5733 (MOVSHDUPrm addr:$src)>;
5735 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5736 (MOVSHDUPrr VR128:$src)>;
5737 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5738 (MOVSHDUPrm addr:$src)>;
5740 // Shuffle with MOVSLDUP
5741 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5742 (MOVSLDUPrr VR128:$src)>;
5743 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5744 (MOVSLDUPrm addr:$src)>;
5746 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5747 (MOVSLDUPrr VR128:$src)>;
5748 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5749 (MOVSLDUPrm addr:$src)>;
5751 // Shuffle with PSHUFHW
5752 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5753 (PSHUFHWri VR128:$src, imm:$imm)>;
5754 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5755 (PSHUFHWmi addr:$src, imm:$imm)>;
5757 // Shuffle with PSHUFLW
5758 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5759 (PSHUFLWri VR128:$src, imm:$imm)>;
5760 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5761 (PSHUFLWmi addr:$src, imm:$imm)>;
5763 // Shuffle with PALIGN
5764 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5765 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5766 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5767 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5768 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5769 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5770 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5771 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5773 // Shuffle with MOVLPS
5774 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5775 (MOVLPSrm VR128:$src1, addr:$src2)>;
5776 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5777 (MOVLPSrm VR128:$src1, addr:$src2)>;
5778 def : Pat<(X86Movlps VR128:$src1,
5779 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5780 (MOVLPSrm VR128:$src1, addr:$src2)>;
5782 // Shuffle with MOVLPD
5783 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5784 (MOVLPDrm VR128:$src1, addr:$src2)>;
5785 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5786 (MOVLPDrm VR128:$src1, addr:$src2)>;
5787 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5788 (scalar_to_vector (loadf64 addr:$src2)))),
5789 (MOVLPDrm VR128:$src1, addr:$src2)>;
5791 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5792 def : Pat<(store (f64 (vector_extract
5793 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5794 (MOVHPSmr addr:$dst, VR128:$src)>;
5795 def : Pat<(store (f64 (vector_extract
5796 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5797 (MOVHPDmr addr:$dst, VR128:$src)>;
5799 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5800 (MOVLPSmr addr:$src1, VR128:$src2)>;
5801 def : Pat<(store (v4i32 (X86Movlps
5802 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5803 (MOVLPSmr addr:$src1, VR128:$src2)>;
5805 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5806 (MOVLPDmr addr:$src1, VR128:$src2)>;
5807 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5808 (MOVLPDmr addr:$src1, VR128:$src2)>;