1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
19 // InstrSchedModel info.
20 X86FoldableSchedWrite Sched = WriteFAdd;
23 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
29 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
30 InstrItinClass arg_ri> {
31 InstrItinClass rr = arg_rr;
32 InstrItinClass rm = arg_rm;
33 InstrItinClass ri = arg_ri;
38 let Sched = WriteFAdd in {
39 def SSE_ALU_F32S : OpndItins<
40 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
43 def SSE_ALU_F64S : OpndItins<
44 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
48 def SSE_ALU_ITINS_S : SizeItins<
49 SSE_ALU_F32S, SSE_ALU_F64S
52 let Sched = WriteFMul in {
53 def SSE_MUL_F32S : OpndItins<
54 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
57 def SSE_MUL_F64S : OpndItins<
58 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
62 def SSE_MUL_ITINS_S : SizeItins<
63 SSE_MUL_F32S, SSE_MUL_F64S
66 let Sched = WriteFDiv in {
67 def SSE_DIV_F32S : OpndItins<
68 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
71 def SSE_DIV_F64S : OpndItins<
72 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
76 def SSE_DIV_ITINS_S : SizeItins<
77 SSE_DIV_F32S, SSE_DIV_F64S
81 let Sched = WriteFAdd in {
82 def SSE_ALU_F32P : OpndItins<
83 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
86 def SSE_ALU_F64P : OpndItins<
87 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
91 def SSE_ALU_ITINS_P : SizeItins<
92 SSE_ALU_F32P, SSE_ALU_F64P
95 let Sched = WriteFMul in {
96 def SSE_MUL_F32P : OpndItins<
97 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
100 def SSE_MUL_F64P : OpndItins<
101 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
105 def SSE_MUL_ITINS_P : SizeItins<
106 SSE_MUL_F32P, SSE_MUL_F64P
109 let Sched = WriteFDiv in {
110 def SSE_DIV_F32P : OpndItins<
111 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
114 def SSE_DIV_F64P : OpndItins<
115 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
119 def SSE_DIV_ITINS_P : SizeItins<
120 SSE_DIV_F32P, SSE_DIV_F64P
123 let Sched = WriteVecLogic in
124 def SSE_VEC_BIT_ITINS_P : OpndItins<
125 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
128 def SSE_BIT_ITINS_P : OpndItins<
129 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
132 let Sched = WriteVecALU in {
133 def SSE_INTALU_ITINS_P : OpndItins<
134 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
137 def SSE_INTALUQ_ITINS_P : OpndItins<
138 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
142 let Sched = WriteVecIMul in
143 def SSE_INTMUL_ITINS_P : OpndItins<
144 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
147 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
148 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
151 def SSE_MOVA_ITINS : OpndItins<
152 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
155 def SSE_MOVU_ITINS : OpndItins<
156 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
159 def SSE_DPPD_ITINS : OpndItins<
160 IIC_SSE_DPPD_RR, IIC_SSE_DPPD_RM
163 def SSE_DPPS_ITINS : OpndItins<
164 IIC_SSE_DPPS_RR, IIC_SSE_DPPD_RM
167 def DEFAULT_ITINS : OpndItins<
168 IIC_ALU_NONMEM, IIC_ALU_MEM
171 def SSE_EXTRACT_ITINS : OpndItins<
172 IIC_SSE_EXTRACTPS_RR, IIC_SSE_EXTRACTPS_RM
175 def SSE_INSERT_ITINS : OpndItins<
176 IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
179 let Sched = WriteMPSAD in
180 def SSE_MPSADBW_ITINS : OpndItins<
181 IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
184 let Sched = WriteVecIMul in
185 def SSE_PMULLD_ITINS : OpndItins<
186 IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
189 // Definitions for backward compatibility.
190 // The instructions mapped on these definitions uses a different itinerary
191 // than the actual scheduling model.
192 let Sched = WriteShuffle in
193 def DEFAULT_ITINS_SHUFFLESCHED : OpndItins<
194 IIC_ALU_NONMEM, IIC_ALU_MEM
197 let Sched = WriteVecIMul in
198 def DEFAULT_ITINS_VECIMULSCHED : OpndItins<
199 IIC_ALU_NONMEM, IIC_ALU_MEM
202 let Sched = WriteShuffle in
203 def SSE_INTALU_ITINS_SHUFF_P : OpndItins<
204 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
207 let Sched = WriteMPSAD in
208 def DEFAULT_ITINS_MPSADSCHED : OpndItins<
209 IIC_ALU_NONMEM, IIC_ALU_MEM
212 let Sched = WriteFBlend in
213 def DEFAULT_ITINS_FBLENDSCHED : OpndItins<
214 IIC_ALU_NONMEM, IIC_ALU_MEM
217 let Sched = WriteBlend in
218 def DEFAULT_ITINS_BLENDSCHED : OpndItins<
219 IIC_ALU_NONMEM, IIC_ALU_MEM
222 let Sched = WriteVarBlend in
223 def DEFAULT_ITINS_VARBLENDSCHED : OpndItins<
224 IIC_ALU_NONMEM, IIC_ALU_MEM
227 let Sched = WriteFBlend in
228 def SSE_INTALU_ITINS_FBLEND_P : OpndItins<
229 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
232 let Sched = WriteBlend in
233 def SSE_INTALU_ITINS_BLEND_P : OpndItins<
234 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
237 //===----------------------------------------------------------------------===//
238 // SSE 1 & 2 Instructions Classes
239 //===----------------------------------------------------------------------===//
241 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
242 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
243 RegisterClass RC, X86MemOperand x86memop,
244 Domain d, OpndItins itins, bit Is2Addr = 1> {
245 let isCommutable = 1 in {
246 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
248 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
249 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
250 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr, d>,
251 Sched<[itins.Sched]>;
253 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
255 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
256 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
257 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm, d>,
258 Sched<[itins.Sched.Folded, ReadAfterLd]>;
261 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
262 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
263 string asm, string SSEVer, string FPSizeStr,
264 Operand memopr, ComplexPattern mem_cpat,
265 Domain d, OpndItins itins, bit Is2Addr = 1> {
266 let isCodeGenOnly = 1 in {
267 def rr_Int : SI_Int<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
269 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
270 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
271 [(set RC:$dst, (!cast<Intrinsic>(
272 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
273 RC:$src1, RC:$src2))], itins.rr, d>,
274 Sched<[itins.Sched]>;
275 def rm_Int : SI_Int<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
277 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
278 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
279 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
280 SSEVer, "_", OpcodeStr, FPSizeStr))
281 RC:$src1, mem_cpat:$src2))], itins.rm, d>,
282 Sched<[itins.Sched.Folded, ReadAfterLd]>;
286 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
287 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
288 RegisterClass RC, ValueType vt,
289 X86MemOperand x86memop, PatFrag mem_frag,
290 Domain d, OpndItins itins, bit Is2Addr = 1> {
291 let isCommutable = 1 in
292 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
294 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
295 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
296 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
297 Sched<[itins.Sched]>;
299 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
301 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
302 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
303 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
305 Sched<[itins.Sched.Folded, ReadAfterLd]>;
308 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
309 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
310 string OpcodeStr, X86MemOperand x86memop,
311 list<dag> pat_rr, list<dag> pat_rm,
313 let isCommutable = 1, hasSideEffects = 0 in
314 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
316 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
317 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
318 pat_rr, NoItinerary, d>,
319 Sched<[WriteVecLogic]>;
320 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
322 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
323 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
324 pat_rm, NoItinerary, d>,
325 Sched<[WriteVecLogicLd, ReadAfterLd]>;
328 //===----------------------------------------------------------------------===//
329 // Non-instruction patterns
330 //===----------------------------------------------------------------------===//
332 // A vector extract of the first f32/f64 position is a subregister copy
333 def : Pat<(f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
334 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
335 def : Pat<(f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
336 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
338 // A 128-bit subvector extract from the first 256-bit vector position
339 // is a subregister copy that needs no instruction.
340 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
341 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
342 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
343 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
345 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
346 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
347 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
348 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
350 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
351 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
352 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
353 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
355 // A 128-bit subvector insert to the first 256-bit vector position
356 // is a subregister copy that needs no instruction.
357 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
358 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
359 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
360 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
361 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
362 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
363 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
364 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
365 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
366 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
367 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
368 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
369 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
372 // Implicitly promote a 32-bit scalar to a vector.
373 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
374 (COPY_TO_REGCLASS FR32:$src, VR128)>;
375 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
376 (COPY_TO_REGCLASS FR32:$src, VR128)>;
377 // Implicitly promote a 64-bit scalar to a vector.
378 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
379 (COPY_TO_REGCLASS FR64:$src, VR128)>;
380 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
381 (COPY_TO_REGCLASS FR64:$src, VR128)>;
383 // Bitcasts between 128-bit vector types. Return the original type since
384 // no instruction is needed for the conversion
385 let Predicates = [HasSSE2] in {
386 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
387 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
388 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
389 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
390 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
391 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
392 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
393 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
394 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
395 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
396 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
397 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
398 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
399 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
400 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
401 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
402 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
403 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
404 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
405 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
406 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
407 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
408 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
409 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
410 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
411 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
412 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
413 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
414 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
415 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
416 def : Pat<(f128 (bitconvert (i128 FR128:$src))), (f128 FR128:$src)>;
417 def : Pat<(i128 (bitconvert (f128 FR128:$src))), (i128 FR128:$src)>;
420 // Bitcasts between 256-bit vector types. Return the original type since
421 // no instruction is needed for the conversion
422 let Predicates = [HasAVX] in {
423 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
424 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
425 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
426 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
427 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
428 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
429 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
430 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
431 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
432 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
433 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
434 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
435 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
436 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
437 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
438 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
439 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
440 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
441 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
442 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
443 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
444 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
445 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
446 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
447 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
448 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
449 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
450 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
451 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
452 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
455 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
456 // This is expanded by ExpandPostRAPseudos.
457 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
458 isPseudo = 1, SchedRW = [WriteZero] in {
459 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
460 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
461 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
462 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
465 //===----------------------------------------------------------------------===//
466 // AVX & SSE - Zero/One Vectors
467 //===----------------------------------------------------------------------===//
469 // Alias instruction that maps zero vector to pxor / xorp* for sse.
470 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
471 // swizzled by ExecutionDepsFix to pxor.
472 // We set canFoldAsLoad because this can be converted to a constant-pool
473 // load of an all-zeros value if folding it would be beneficial.
474 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
475 isPseudo = 1, SchedRW = [WriteZero] in {
476 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
477 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
480 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
481 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
482 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
483 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
484 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
487 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
488 // and doesn't need it because on sandy bridge the register is set to zero
489 // at the rename stage without using any execution unit, so SET0PSY
490 // and SET0PDY can be used for vector int instructions without penalty
491 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
492 isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
493 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
494 [(set VR256:$dst, (v8f32 immAllZerosV))]>;
497 let Predicates = [HasAVX] in
498 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
500 let Predicates = [HasAVX2] in {
501 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
502 def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
503 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
504 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
507 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
508 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
509 let Predicates = [HasAVX1Only] in {
510 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
511 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
512 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
514 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
515 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
516 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
518 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
519 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
520 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
522 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
523 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
524 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
527 // We set canFoldAsLoad because this can be converted to a constant-pool
528 // load of an all-ones value if folding it would be beneficial.
529 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
530 isPseudo = 1, SchedRW = [WriteZero] in {
531 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
532 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
533 let Predicates = [HasAVX2] in
534 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
535 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
539 //===----------------------------------------------------------------------===//
540 // SSE 1 & 2 - Move FP Scalar Instructions
542 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
543 // register copies because it's a partial register update; Register-to-register
544 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
545 // that the insert be implementable in terms of a copy, and just mentioned, we
546 // don't use movss/movsd for copies.
547 //===----------------------------------------------------------------------===//
549 multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
550 X86MemOperand x86memop, string base_opc,
551 string asm_opr, Domain d = GenericDomain> {
552 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
553 (ins VR128:$src1, RC:$src2),
554 !strconcat(base_opc, asm_opr),
555 [(set VR128:$dst, (vt (OpNode VR128:$src1,
556 (scalar_to_vector RC:$src2))))],
557 IIC_SSE_MOV_S_RR, d>, Sched<[WriteFShuffle]>;
559 // For the disassembler
560 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
561 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
562 (ins VR128:$src1, RC:$src2),
563 !strconcat(base_opc, asm_opr),
564 [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
567 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
568 X86MemOperand x86memop, string OpcodeStr,
569 Domain d = GenericDomain> {
571 defm V#NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
572 "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
575 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
576 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
577 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
578 VEX, VEX_LIG, Sched<[WriteStore]>;
580 let Constraints = "$src1 = $dst" in {
581 defm NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
582 "\t{$src2, $dst|$dst, $src2}", d>;
585 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
586 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
587 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
591 // Loading from memory automatically zeroing upper bits.
592 multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
593 PatFrag mem_pat, string OpcodeStr,
594 Domain d = GenericDomain> {
595 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
596 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
597 [(set RC:$dst, (mem_pat addr:$src))],
598 IIC_SSE_MOV_S_RM, d>, VEX, VEX_LIG, Sched<[WriteLoad]>;
599 def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
600 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
601 [(set RC:$dst, (mem_pat addr:$src))],
602 IIC_SSE_MOV_S_RM, d>, Sched<[WriteLoad]>;
605 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
606 SSEPackedSingle>, XS;
607 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
608 SSEPackedDouble>, XD;
610 let canFoldAsLoad = 1, isReMaterializable = 1 in {
611 defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss",
612 SSEPackedSingle>, XS;
614 let AddedComplexity = 20 in
615 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
616 SSEPackedDouble>, XD;
620 let Predicates = [UseAVX] in {
621 let AddedComplexity = 20 in {
622 // MOVSSrm zeros the high parts of the register; represent this
623 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
624 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
625 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
626 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
627 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
628 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
629 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
631 // MOVSDrm zeros the high parts of the register; represent this
632 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
633 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
634 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
635 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
636 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
637 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
638 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
639 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
640 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
641 def : Pat<(v2f64 (X86vzload addr:$src)),
642 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
644 // Represent the same patterns above but in the form they appear for
646 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
647 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
648 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
649 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
650 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
651 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
654 // Extract and store.
655 def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
657 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
658 def : Pat<(store (f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
660 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
662 // Shuffle with VMOVSS
663 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
664 (VMOVSSrr (v4i32 VR128:$src1),
665 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
666 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
667 (VMOVSSrr (v4f32 VR128:$src1),
668 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
671 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
672 (SUBREG_TO_REG (i32 0),
673 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
674 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
676 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
677 (SUBREG_TO_REG (i32 0),
678 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
679 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
682 // Shuffle with VMOVSD
683 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
684 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
685 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
686 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
687 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
688 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
689 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
690 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
693 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
694 (SUBREG_TO_REG (i32 0),
695 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
696 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
698 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
699 (SUBREG_TO_REG (i32 0),
700 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
701 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
704 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
705 // is during lowering, where it's not possible to recognize the fold cause
706 // it has two uses through a bitcast. One use disappears at isel time and the
707 // fold opportunity reappears.
708 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
709 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
710 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
711 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
712 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
713 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
714 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
715 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
718 let Predicates = [UseSSE1] in {
719 let Predicates = [NoSSE41], AddedComplexity = 15 in {
720 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
721 // MOVSS to the lower bits.
722 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
723 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
724 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
725 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
726 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
727 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
730 let AddedComplexity = 20 in {
731 // MOVSSrm already zeros the high parts of the register.
732 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
733 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
734 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
735 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
736 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
737 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
740 // Extract and store.
741 def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
743 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
745 // Shuffle with MOVSS
746 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
747 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
748 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
749 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
752 let Predicates = [UseSSE2] in {
753 let Predicates = [NoSSE41], AddedComplexity = 15 in {
754 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
755 // MOVSD to the lower bits.
756 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
757 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
760 let AddedComplexity = 20 in {
761 // MOVSDrm already zeros the high parts of the register.
762 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
763 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
764 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
765 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
766 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
767 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
768 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
769 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
770 def : Pat<(v2f64 (X86vzload addr:$src)),
771 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
774 // Extract and store.
775 def : Pat<(store (f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
777 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
779 // Shuffle with MOVSD
780 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
781 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
782 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
783 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
784 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
785 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
786 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
787 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
789 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
790 // is during lowering, where it's not possible to recognize the fold because
791 // it has two uses through a bitcast. One use disappears at isel time and the
792 // fold opportunity reappears.
793 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
794 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
795 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
796 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
797 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
798 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
799 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
800 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
803 //===----------------------------------------------------------------------===//
804 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
805 //===----------------------------------------------------------------------===//
807 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
808 X86MemOperand x86memop, PatFrag ld_frag,
809 string asm, Domain d,
811 bit IsReMaterializable = 1> {
812 let hasSideEffects = 0 in
813 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
814 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
815 Sched<[WriteFShuffle]>;
816 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
817 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
818 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
819 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>,
823 let Predicates = [HasAVX, NoVLX] in {
824 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
825 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
827 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
828 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
830 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
831 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
833 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
834 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
837 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
838 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
840 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
841 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
843 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
844 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
846 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
847 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
851 let Predicates = [UseSSE1] in {
852 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
853 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
855 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
856 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
859 let Predicates = [UseSSE2] in {
860 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
861 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
863 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
864 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
868 let SchedRW = [WriteStore], Predicates = [HasAVX, NoVLX] in {
869 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
870 "movaps\t{$src, $dst|$dst, $src}",
871 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
872 IIC_SSE_MOVA_P_MR>, VEX;
873 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
874 "movapd\t{$src, $dst|$dst, $src}",
875 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
876 IIC_SSE_MOVA_P_MR>, VEX;
877 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
878 "movups\t{$src, $dst|$dst, $src}",
879 [(store (v4f32 VR128:$src), addr:$dst)],
880 IIC_SSE_MOVU_P_MR>, VEX;
881 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
882 "movupd\t{$src, $dst|$dst, $src}",
883 [(store (v2f64 VR128:$src), addr:$dst)],
884 IIC_SSE_MOVU_P_MR>, VEX;
885 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
886 "movaps\t{$src, $dst|$dst, $src}",
887 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
888 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
889 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
890 "movapd\t{$src, $dst|$dst, $src}",
891 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
892 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
893 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
894 "movups\t{$src, $dst|$dst, $src}",
895 [(store (v8f32 VR256:$src), addr:$dst)],
896 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
897 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
898 "movupd\t{$src, $dst|$dst, $src}",
899 [(store (v4f64 VR256:$src), addr:$dst)],
900 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
904 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
905 SchedRW = [WriteFShuffle] in {
906 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
908 "movaps\t{$src, $dst|$dst, $src}", [],
909 IIC_SSE_MOVA_P_RR>, VEX;
910 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
912 "movapd\t{$src, $dst|$dst, $src}", [],
913 IIC_SSE_MOVA_P_RR>, VEX;
914 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
916 "movups\t{$src, $dst|$dst, $src}", [],
917 IIC_SSE_MOVU_P_RR>, VEX;
918 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
920 "movupd\t{$src, $dst|$dst, $src}", [],
921 IIC_SSE_MOVU_P_RR>, VEX;
922 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
924 "movaps\t{$src, $dst|$dst, $src}", [],
925 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
926 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
928 "movapd\t{$src, $dst|$dst, $src}", [],
929 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
930 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
932 "movups\t{$src, $dst|$dst, $src}", [],
933 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
934 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
936 "movupd\t{$src, $dst|$dst, $src}", [],
937 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
940 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
941 (VMOVUPSYmr addr:$dst, VR256:$src)>;
942 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
943 (VMOVUPDYmr addr:$dst, VR256:$src)>;
945 let SchedRW = [WriteStore] in {
946 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
947 "movaps\t{$src, $dst|$dst, $src}",
948 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
950 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
951 "movapd\t{$src, $dst|$dst, $src}",
952 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
954 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
955 "movups\t{$src, $dst|$dst, $src}",
956 [(store (v4f32 VR128:$src), addr:$dst)],
958 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
959 "movupd\t{$src, $dst|$dst, $src}",
960 [(store (v2f64 VR128:$src), addr:$dst)],
965 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
966 SchedRW = [WriteFShuffle] in {
967 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
968 "movaps\t{$src, $dst|$dst, $src}", [],
970 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
971 "movapd\t{$src, $dst|$dst, $src}", [],
973 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
974 "movups\t{$src, $dst|$dst, $src}", [],
976 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
977 "movupd\t{$src, $dst|$dst, $src}", [],
981 let Predicates = [HasAVX] in {
982 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
983 (VMOVUPSmr addr:$dst, VR128:$src)>;
984 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
985 (VMOVUPDmr addr:$dst, VR128:$src)>;
988 let Predicates = [UseSSE1] in
989 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
990 (MOVUPSmr addr:$dst, VR128:$src)>;
991 let Predicates = [UseSSE2] in
992 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
993 (MOVUPDmr addr:$dst, VR128:$src)>;
995 // Use vmovaps/vmovups for AVX integer load/store.
996 let Predicates = [HasAVX, NoVLX] in {
997 // 128-bit load/store
998 def : Pat<(alignedloadv2i64 addr:$src),
999 (VMOVAPSrm addr:$src)>;
1000 def : Pat<(loadv2i64 addr:$src),
1001 (VMOVUPSrm addr:$src)>;
1003 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1004 (VMOVAPSmr addr:$dst, VR128:$src)>;
1005 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1006 (VMOVAPSmr addr:$dst, VR128:$src)>;
1007 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1008 (VMOVAPSmr addr:$dst, VR128:$src)>;
1009 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1010 (VMOVAPSmr addr:$dst, VR128:$src)>;
1011 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1012 (VMOVUPSmr addr:$dst, VR128:$src)>;
1013 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1014 (VMOVUPSmr addr:$dst, VR128:$src)>;
1015 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1016 (VMOVUPSmr addr:$dst, VR128:$src)>;
1017 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1018 (VMOVUPSmr addr:$dst, VR128:$src)>;
1020 // 256-bit load/store
1021 def : Pat<(alignedloadv4i64 addr:$src),
1022 (VMOVAPSYrm addr:$src)>;
1023 def : Pat<(loadv4i64 addr:$src),
1024 (VMOVUPSYrm addr:$src)>;
1025 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1026 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1027 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1028 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1029 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1030 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1031 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1032 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1033 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1034 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1035 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1036 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1037 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1038 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1039 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1040 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1042 // Special patterns for storing subvector extracts of lower 128-bits
1043 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
1044 def : Pat<(alignedstore (v2f64 (extract_subvector
1045 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1046 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1047 def : Pat<(alignedstore (v4f32 (extract_subvector
1048 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1049 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1050 def : Pat<(alignedstore (v2i64 (extract_subvector
1051 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1052 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1053 def : Pat<(alignedstore (v4i32 (extract_subvector
1054 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1055 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1056 def : Pat<(alignedstore (v8i16 (extract_subvector
1057 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1058 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1059 def : Pat<(alignedstore (v16i8 (extract_subvector
1060 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1061 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1063 def : Pat<(store (v2f64 (extract_subvector
1064 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1065 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1066 def : Pat<(store (v4f32 (extract_subvector
1067 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1068 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1069 def : Pat<(store (v2i64 (extract_subvector
1070 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1071 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1072 def : Pat<(store (v4i32 (extract_subvector
1073 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1074 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1075 def : Pat<(store (v8i16 (extract_subvector
1076 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1077 (VMOVUPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1078 def : Pat<(store (v16i8 (extract_subvector
1079 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1080 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1083 // Use movaps / movups for SSE integer load / store (one byte shorter).
1084 // The instructions selected below are then converted to MOVDQA/MOVDQU
1085 // during the SSE domain pass.
1086 let Predicates = [UseSSE1] in {
1087 def : Pat<(alignedloadv2i64 addr:$src),
1088 (MOVAPSrm addr:$src)>;
1089 def : Pat<(loadv2i64 addr:$src),
1090 (MOVUPSrm addr:$src)>;
1092 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1093 (MOVAPSmr addr:$dst, VR128:$src)>;
1094 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1095 (MOVAPSmr addr:$dst, VR128:$src)>;
1096 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1097 (MOVAPSmr addr:$dst, VR128:$src)>;
1098 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1099 (MOVAPSmr addr:$dst, VR128:$src)>;
1100 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1101 (MOVUPSmr addr:$dst, VR128:$src)>;
1102 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1103 (MOVUPSmr addr:$dst, VR128:$src)>;
1104 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1105 (MOVUPSmr addr:$dst, VR128:$src)>;
1106 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1107 (MOVUPSmr addr:$dst, VR128:$src)>;
1110 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1111 // bits are disregarded. FIXME: Set encoding to pseudo!
1112 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1113 let isCodeGenOnly = 1 in {
1114 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1115 "movaps\t{$src, $dst|$dst, $src}",
1116 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1117 IIC_SSE_MOVA_P_RM>, VEX;
1118 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1119 "movapd\t{$src, $dst|$dst, $src}",
1120 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1121 IIC_SSE_MOVA_P_RM>, VEX;
1122 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1123 "movaps\t{$src, $dst|$dst, $src}",
1124 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1126 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1127 "movapd\t{$src, $dst|$dst, $src}",
1128 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1133 //===----------------------------------------------------------------------===//
1134 // SSE 1 & 2 - Move Low packed FP Instructions
1135 //===----------------------------------------------------------------------===//
1137 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
1138 string base_opc, string asm_opr,
1139 InstrItinClass itin> {
1140 def PSrm : PI<opc, MRMSrcMem,
1141 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1142 !strconcat(base_opc, "s", asm_opr),
1144 (psnode VR128:$src1,
1145 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1146 itin, SSEPackedSingle>, PS,
1147 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1149 def PDrm : PI<opc, MRMSrcMem,
1150 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1151 !strconcat(base_opc, "d", asm_opr),
1152 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
1153 (scalar_to_vector (loadf64 addr:$src2)))))],
1154 itin, SSEPackedDouble>, PD,
1155 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1159 multiclass sse12_mov_hilo_packed<bits<8>opc, SDNode psnode, SDNode pdnode,
1160 string base_opc, InstrItinClass itin> {
1161 let Predicates = [UseAVX] in
1162 defm V#NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1163 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1166 let Constraints = "$src1 = $dst" in
1167 defm NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1168 "\t{$src2, $dst|$dst, $src2}",
1172 let AddedComplexity = 20 in {
1173 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp",
1177 let SchedRW = [WriteStore] in {
1178 let Predicates = [UseAVX] in {
1179 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1180 "movlps\t{$src, $dst|$dst, $src}",
1181 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
1182 (iPTR 0))), addr:$dst)],
1183 IIC_SSE_MOV_LH>, VEX;
1184 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1185 "movlpd\t{$src, $dst|$dst, $src}",
1186 [(store (f64 (extractelt (v2f64 VR128:$src),
1187 (iPTR 0))), addr:$dst)],
1188 IIC_SSE_MOV_LH>, VEX;
1190 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1191 "movlps\t{$src, $dst|$dst, $src}",
1192 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
1193 (iPTR 0))), addr:$dst)],
1195 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1196 "movlpd\t{$src, $dst|$dst, $src}",
1197 [(store (f64 (extractelt (v2f64 VR128:$src),
1198 (iPTR 0))), addr:$dst)],
1202 let Predicates = [UseAVX] in {
1203 // Shuffle with VMOVLPS
1204 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1205 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1206 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1207 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1209 // Shuffle with VMOVLPD
1210 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1211 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1212 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1213 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1214 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1215 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1216 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1219 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1221 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1222 def : Pat<(store (v4i32 (X86Movlps
1223 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1224 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1225 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1227 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1228 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1230 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1233 let Predicates = [UseSSE1] in {
1234 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1235 def : Pat<(store (i64 (extractelt (bc_v2i64 (v4f32 VR128:$src2)),
1236 (iPTR 0))), addr:$src1),
1237 (MOVLPSmr addr:$src1, VR128:$src2)>;
1239 // Shuffle with MOVLPS
1240 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1241 (MOVLPSrm VR128:$src1, addr:$src2)>;
1242 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1243 (MOVLPSrm VR128:$src1, addr:$src2)>;
1244 def : Pat<(X86Movlps VR128:$src1,
1245 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1246 (MOVLPSrm VR128:$src1, addr:$src2)>;
1249 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1251 (MOVLPSmr addr:$src1, VR128:$src2)>;
1252 def : Pat<(store (v4i32 (X86Movlps
1253 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1255 (MOVLPSmr addr:$src1, VR128:$src2)>;
1258 let Predicates = [UseSSE2] in {
1259 // Shuffle with MOVLPD
1260 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1261 (MOVLPDrm VR128:$src1, addr:$src2)>;
1262 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1263 (MOVLPDrm VR128:$src1, addr:$src2)>;
1264 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1265 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1266 (MOVLPDrm VR128:$src1, addr:$src2)>;
1269 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1271 (MOVLPDmr addr:$src1, VR128:$src2)>;
1272 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1274 (MOVLPDmr addr:$src1, VR128:$src2)>;
1277 //===----------------------------------------------------------------------===//
1278 // SSE 1 & 2 - Move Hi packed FP Instructions
1279 //===----------------------------------------------------------------------===//
1281 let AddedComplexity = 20 in {
1282 defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Movlhpd, "movhp",
1286 let SchedRW = [WriteStore] in {
1287 // v2f64 extract element 1 is always custom lowered to unpack high to low
1288 // and extract element 0 so the non-store version isn't too horrible.
1289 let Predicates = [UseAVX] in {
1290 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1291 "movhps\t{$src, $dst|$dst, $src}",
1292 [(store (f64 (extractelt
1293 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1294 (bc_v2f64 (v4f32 VR128:$src))),
1295 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1296 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1297 "movhpd\t{$src, $dst|$dst, $src}",
1298 [(store (f64 (extractelt
1299 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1300 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1302 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1303 "movhps\t{$src, $dst|$dst, $src}",
1304 [(store (f64 (extractelt
1305 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1306 (bc_v2f64 (v4f32 VR128:$src))),
1307 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1308 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1309 "movhpd\t{$src, $dst|$dst, $src}",
1310 [(store (f64 (extractelt
1311 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1312 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1315 let Predicates = [UseAVX] in {
1317 def : Pat<(X86Movlhps VR128:$src1,
1318 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1319 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1320 def : Pat<(X86Movlhps VR128:$src1,
1321 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1322 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1326 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1327 // is during lowering, where it's not possible to recognize the load fold
1328 // cause it has two uses through a bitcast. One use disappears at isel time
1329 // and the fold opportunity reappears.
1330 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1331 (scalar_to_vector (loadf64 addr:$src2)))),
1332 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1333 // Also handle an i64 load because that may get selected as a faster way to
1335 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1336 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1337 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1339 def : Pat<(store (f64 (extractelt
1340 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
1341 (iPTR 0))), addr:$dst),
1342 (VMOVHPDmr addr:$dst, VR128:$src)>;
1345 let Predicates = [UseSSE1] in {
1347 def : Pat<(X86Movlhps VR128:$src1,
1348 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1349 (MOVHPSrm VR128:$src1, addr:$src2)>;
1350 def : Pat<(X86Movlhps VR128:$src1,
1351 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1352 (MOVHPSrm VR128:$src1, addr:$src2)>;
1355 let Predicates = [UseSSE2] in {
1358 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1359 // is during lowering, where it's not possible to recognize the load fold
1360 // cause it has two uses through a bitcast. One use disappears at isel time
1361 // and the fold opportunity reappears.
1362 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1363 (scalar_to_vector (loadf64 addr:$src2)))),
1364 (MOVHPDrm VR128:$src1, addr:$src2)>;
1365 // Also handle an i64 load because that may get selected as a faster way to
1367 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1368 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1369 (MOVHPDrm VR128:$src1, addr:$src2)>;
1371 def : Pat<(store (f64 (extractelt
1372 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
1373 (iPTR 0))), addr:$dst),
1374 (MOVHPDmr addr:$dst, VR128:$src)>;
1377 //===----------------------------------------------------------------------===//
1378 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1379 //===----------------------------------------------------------------------===//
1381 let AddedComplexity = 20, Predicates = [UseAVX] in {
1382 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1383 (ins VR128:$src1, VR128:$src2),
1384 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1386 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1388 VEX_4V, Sched<[WriteFShuffle]>;
1389 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1390 (ins VR128:$src1, VR128:$src2),
1391 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1393 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1395 VEX_4V, Sched<[WriteFShuffle]>;
1397 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1398 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1399 (ins VR128:$src1, VR128:$src2),
1400 "movlhps\t{$src2, $dst|$dst, $src2}",
1402 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1403 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1404 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1405 (ins VR128:$src1, VR128:$src2),
1406 "movhlps\t{$src2, $dst|$dst, $src2}",
1408 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1409 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1412 let Predicates = [UseAVX] in {
1414 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1415 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1416 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1417 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1420 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1421 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1424 let Predicates = [UseSSE1] in {
1426 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1427 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1428 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1429 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1432 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1433 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1436 //===----------------------------------------------------------------------===//
1437 // SSE 1 & 2 - Conversion Instructions
1438 //===----------------------------------------------------------------------===//
1440 def SSE_CVT_PD : OpndItins<
1441 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1444 let Sched = WriteCvtI2F in
1445 def SSE_CVT_PS : OpndItins<
1446 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1449 let Sched = WriteCvtI2F in
1450 def SSE_CVT_Scalar : OpndItins<
1451 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1454 let Sched = WriteCvtF2I in
1455 def SSE_CVT_SS2SI_32 : OpndItins<
1456 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1459 let Sched = WriteCvtF2I in
1460 def SSE_CVT_SS2SI_64 : OpndItins<
1461 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1464 let Sched = WriteCvtF2I in
1465 def SSE_CVT_SD2SI : OpndItins<
1466 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1469 // FIXME: We probably want to match the rm form only when optimizing for
1470 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1471 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1472 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1473 string asm, OpndItins itins> {
1474 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1475 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1476 itins.rr>, Sched<[itins.Sched]>;
1477 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1478 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1479 itins.rm>, Sched<[itins.Sched.Folded]>;
1482 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1483 X86MemOperand x86memop, string asm, Domain d,
1485 let hasSideEffects = 0 in {
1486 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1487 [], itins.rr, d>, Sched<[itins.Sched]>;
1489 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1490 [], itins.rm, d>, Sched<[itins.Sched.Folded]>;
1494 // FIXME: We probably want to match the rm form only when optimizing for
1495 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1496 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1497 X86MemOperand x86memop, string asm> {
1498 let hasSideEffects = 0, Predicates = [UseAVX] in {
1499 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1500 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1501 Sched<[WriteCvtI2F]>;
1503 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1504 (ins DstRC:$src1, x86memop:$src),
1505 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1506 Sched<[WriteCvtI2FLd, ReadAfterLd]>;
1507 } // hasSideEffects = 0
1510 let Predicates = [UseAVX] in {
1511 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1512 "cvttss2si\t{$src, $dst|$dst, $src}",
1515 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1516 "cvttss2si\t{$src, $dst|$dst, $src}",
1518 XS, VEX, VEX_W, VEX_LIG;
1519 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1520 "cvttsd2si\t{$src, $dst|$dst, $src}",
1523 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1524 "cvttsd2si\t{$src, $dst|$dst, $src}",
1526 XD, VEX, VEX_W, VEX_LIG;
1528 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1529 (VCVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1530 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1531 (VCVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1532 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1533 (VCVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1534 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1535 (VCVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1536 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1537 (VCVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1538 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1539 (VCVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1540 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1541 (VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1542 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1543 (VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1545 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1546 // register, but the same isn't true when only using memory operands,
1547 // provide other assembly "l" and "q" forms to address this explicitly
1548 // where appropriate to do so.
1549 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}">,
1550 XS, VEX_4V, VEX_LIG;
1551 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1552 XS, VEX_4V, VEX_W, VEX_LIG;
1553 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
1554 XD, VEX_4V, VEX_LIG;
1555 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1556 XD, VEX_4V, VEX_W, VEX_LIG;
1558 let Predicates = [UseAVX] in {
1559 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1560 (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1561 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1562 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1564 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1565 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1566 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1567 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1568 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1569 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1570 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1571 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1573 def : Pat<(f32 (sint_to_fp GR32:$src)),
1574 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1575 def : Pat<(f32 (sint_to_fp GR64:$src)),
1576 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1577 def : Pat<(f64 (sint_to_fp GR32:$src)),
1578 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1579 def : Pat<(f64 (sint_to_fp GR64:$src)),
1580 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1583 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1584 "cvttss2si\t{$src, $dst|$dst, $src}",
1585 SSE_CVT_SS2SI_32>, XS;
1586 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1587 "cvttss2si\t{$src, $dst|$dst, $src}",
1588 SSE_CVT_SS2SI_64>, XS, REX_W;
1589 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1590 "cvttsd2si\t{$src, $dst|$dst, $src}",
1592 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1593 "cvttsd2si\t{$src, $dst|$dst, $src}",
1594 SSE_CVT_SD2SI>, XD, REX_W;
1595 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1596 "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
1597 SSE_CVT_Scalar>, XS;
1598 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1599 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1600 SSE_CVT_Scalar>, XS, REX_W;
1601 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1602 "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
1603 SSE_CVT_Scalar>, XD;
1604 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1605 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1606 SSE_CVT_Scalar>, XD, REX_W;
1608 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1609 (CVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1610 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1611 (CVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1612 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1613 (CVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1614 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1615 (CVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1616 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1617 (CVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1618 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1619 (CVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1620 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1621 (CVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1622 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1623 (CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1625 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1626 (CVTSI2SSrm FR64:$dst, i32mem:$src), 0>;
1627 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1628 (CVTSI2SDrm FR64:$dst, i32mem:$src), 0>;
1630 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1631 // and/or XMM operand(s).
1633 // FIXME: We probably want to match the rm form only when optimizing for
1634 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1635 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1636 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1637 string asm, OpndItins itins> {
1638 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1639 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1640 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
1641 Sched<[itins.Sched]>;
1642 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1643 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1644 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
1645 Sched<[itins.Sched.Folded]>;
1648 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1649 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1650 PatFrag ld_frag, string asm, OpndItins itins,
1652 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1654 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1655 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1656 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1657 itins.rr>, Sched<[itins.Sched]>;
1658 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1659 (ins DstRC:$src1, x86memop:$src2),
1661 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1662 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1663 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1664 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
1667 let Predicates = [UseAVX] in {
1668 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1669 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si",
1670 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1671 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1672 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si",
1673 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1675 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1676 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD;
1677 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1678 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1681 let isCodeGenOnly = 1 in {
1682 let Predicates = [UseAVX] in {
1683 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1684 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
1685 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1686 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1687 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1688 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1690 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1691 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
1692 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1693 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1694 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1695 SSE_CVT_Scalar, 0>, XD,
1698 let Constraints = "$src1 = $dst" in {
1699 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1700 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1701 "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
1702 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1703 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1704 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1705 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1706 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1707 "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
1708 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1709 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1710 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1712 } // isCodeGenOnly = 1
1716 // Aliases for intrinsics
1717 let isCodeGenOnly = 1 in {
1718 let Predicates = [UseAVX] in {
1719 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1720 ssmem, sse_load_f32, "cvttss2si",
1721 SSE_CVT_SS2SI_32>, XS, VEX;
1722 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1723 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1724 "cvttss2si", SSE_CVT_SS2SI_64>,
1726 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1727 sdmem, sse_load_f64, "cvttsd2si",
1728 SSE_CVT_SD2SI>, XD, VEX;
1729 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1730 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1731 "cvttsd2si", SSE_CVT_SD2SI>,
1734 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1735 ssmem, sse_load_f32, "cvttss2si",
1736 SSE_CVT_SS2SI_32>, XS;
1737 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1738 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1739 "cvttss2si", SSE_CVT_SS2SI_64>, XS, REX_W;
1740 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1741 sdmem, sse_load_f64, "cvttsd2si",
1743 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1744 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1745 "cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1746 } // isCodeGenOnly = 1
1748 let Predicates = [UseAVX] in {
1749 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1750 ssmem, sse_load_f32, "cvtss2si",
1751 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1752 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1753 ssmem, sse_load_f32, "cvtss2si",
1754 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1756 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1757 ssmem, sse_load_f32, "cvtss2si",
1758 SSE_CVT_SS2SI_32>, XS;
1759 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1760 ssmem, sse_load_f32, "cvtss2si",
1761 SSE_CVT_SS2SI_64>, XS, REX_W;
1763 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1764 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1765 SSEPackedSingle, SSE_CVT_PS>,
1766 PS, VEX, Requires<[HasAVX]>;
1767 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1768 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1769 SSEPackedSingle, SSE_CVT_PS>,
1770 PS, VEX, VEX_L, Requires<[HasAVX]>;
1772 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1773 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1774 SSEPackedSingle, SSE_CVT_PS>,
1775 PS, Requires<[UseSSE2]>;
1777 let Predicates = [UseAVX] in {
1778 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1779 (VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1780 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1781 (VCVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1782 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1783 (VCVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1784 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1785 (VCVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1786 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1787 (VCVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1788 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1789 (VCVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1790 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1791 (VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1792 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1793 (VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1796 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1797 (CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1798 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1799 (CVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1800 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1801 (CVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1802 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1803 (CVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1804 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1805 (CVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1806 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1807 (CVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1808 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1809 (CVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1810 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1811 (CVTSD2SI64rm GR64:$dst, sdmem:$src)>;
1815 // Convert scalar double to scalar single
1816 let hasSideEffects = 0, Predicates = [UseAVX] in {
1817 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1818 (ins FR64:$src1, FR64:$src2),
1819 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1820 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG,
1821 Sched<[WriteCvtF2F]>;
1823 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1824 (ins FR64:$src1, f64mem:$src2),
1825 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1826 [], IIC_SSE_CVT_Scalar_RM>,
1827 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG,
1828 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1831 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1834 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1835 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1836 [(set FR32:$dst, (fround FR64:$src))],
1837 IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>;
1838 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1839 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1840 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1841 IIC_SSE_CVT_Scalar_RM>,
1843 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1845 let isCodeGenOnly = 1 in {
1846 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1847 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1848 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1850 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1851 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>,
1852 Sched<[WriteCvtF2F]>;
1853 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1854 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1855 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1856 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1857 VR128:$src1, sse_load_f64:$src2))],
1858 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>,
1859 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1861 let Constraints = "$src1 = $dst" in {
1862 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1863 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1864 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1866 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1867 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
1868 Sched<[WriteCvtF2F]>;
1869 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1870 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1871 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1872 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1873 VR128:$src1, sse_load_f64:$src2))],
1874 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
1875 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1877 } // isCodeGenOnly = 1
1879 // Convert scalar single to scalar double
1880 // SSE2 instructions with XS prefix
1881 let hasSideEffects = 0, Predicates = [UseAVX] in {
1882 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1883 (ins FR32:$src1, FR32:$src2),
1884 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1885 [], IIC_SSE_CVT_Scalar_RR>,
1886 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG,
1887 Sched<[WriteCvtF2F]>;
1889 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1890 (ins FR32:$src1, f32mem:$src2),
1891 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1892 [], IIC_SSE_CVT_Scalar_RM>,
1893 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>,
1894 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1897 def : Pat<(f64 (fextend FR32:$src)),
1898 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>;
1899 def : Pat<(fextend (loadf32 addr:$src)),
1900 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>;
1902 def : Pat<(extloadf32 addr:$src),
1903 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1904 Requires<[UseAVX, OptForSize]>;
1905 def : Pat<(extloadf32 addr:$src),
1906 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1907 Requires<[UseAVX, OptForSpeed]>;
1909 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1910 "cvtss2sd\t{$src, $dst|$dst, $src}",
1911 [(set FR64:$dst, (fextend FR32:$src))],
1912 IIC_SSE_CVT_Scalar_RR>, XS,
1913 Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
1914 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1915 "cvtss2sd\t{$src, $dst|$dst, $src}",
1916 [(set FR64:$dst, (extloadf32 addr:$src))],
1917 IIC_SSE_CVT_Scalar_RM>, XS,
1918 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1920 // extload f32 -> f64. This matches load+fextend because we have a hack in
1921 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1923 // Since these loads aren't folded into the fextend, we have to match it
1925 def : Pat<(fextend (loadf32 addr:$src)),
1926 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1927 def : Pat<(extloadf32 addr:$src),
1928 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1930 let isCodeGenOnly = 1 in {
1931 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1932 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1933 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1935 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1936 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>,
1937 Sched<[WriteCvtF2F]>;
1938 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1939 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1940 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1942 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1943 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>,
1944 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1945 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1946 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1947 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1948 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1950 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1951 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
1952 Sched<[WriteCvtF2F]>;
1953 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1954 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1955 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1957 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1958 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
1959 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1961 } // isCodeGenOnly = 1
1963 // Convert packed single/double fp to doubleword
1964 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1965 "cvtps2dq\t{$src, $dst|$dst, $src}",
1966 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1967 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
1968 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1969 "cvtps2dq\t{$src, $dst|$dst, $src}",
1971 (int_x86_sse2_cvtps2dq (loadv4f32 addr:$src)))],
1972 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
1973 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1974 "cvtps2dq\t{$src, $dst|$dst, $src}",
1976 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1977 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
1978 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1979 "cvtps2dq\t{$src, $dst|$dst, $src}",
1981 (int_x86_avx_cvt_ps2dq_256 (loadv8f32 addr:$src)))],
1982 IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
1983 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1984 "cvtps2dq\t{$src, $dst|$dst, $src}",
1985 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1986 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
1987 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1988 "cvtps2dq\t{$src, $dst|$dst, $src}",
1990 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1991 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
1994 // Convert Packed Double FP to Packed DW Integers
1995 let Predicates = [HasAVX] in {
1996 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1997 // register, but the same isn't true when using memory operands instead.
1998 // Provide other assembly rr and rm forms to address this explicitly.
1999 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2000 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
2001 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
2002 VEX, Sched<[WriteCvtF2I]>;
2005 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2006 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
2007 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2008 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2010 (int_x86_sse2_cvtpd2dq (loadv2f64 addr:$src)))]>, VEX,
2011 Sched<[WriteCvtF2ILd]>;
2014 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2015 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2017 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L,
2018 Sched<[WriteCvtF2I]>;
2019 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2020 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2022 (int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
2023 VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2024 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
2025 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2028 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2029 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2031 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
2032 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
2033 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2034 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2035 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
2036 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2038 // Convert with truncation packed single/double fp to doubleword
2039 // SSE2 packed instructions with XS prefix
2040 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2041 "cvttps2dq\t{$src, $dst|$dst, $src}",
2043 (int_x86_sse2_cvttps2dq VR128:$src))],
2044 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
2045 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2046 "cvttps2dq\t{$src, $dst|$dst, $src}",
2047 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
2048 (loadv4f32 addr:$src)))],
2049 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2050 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2051 "cvttps2dq\t{$src, $dst|$dst, $src}",
2053 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
2054 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2055 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2056 "cvttps2dq\t{$src, $dst|$dst, $src}",
2057 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
2058 (loadv8f32 addr:$src)))],
2059 IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
2060 Sched<[WriteCvtF2ILd]>;
2062 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2063 "cvttps2dq\t{$src, $dst|$dst, $src}",
2064 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
2065 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
2066 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2067 "cvttps2dq\t{$src, $dst|$dst, $src}",
2069 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
2070 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2072 let Predicates = [HasAVX] in {
2073 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2074 (VCVTDQ2PSrr VR128:$src)>;
2075 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (loadv2i64 addr:$src))),
2076 (VCVTDQ2PSrm addr:$src)>;
2079 let Predicates = [HasAVX, NoVLX] in {
2080 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2081 (VCVTDQ2PSrr VR128:$src)>;
2082 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2083 (VCVTDQ2PSrm addr:$src)>;
2085 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2086 (VCVTTPS2DQrr VR128:$src)>;
2087 def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
2088 (VCVTTPS2DQrm addr:$src)>;
2090 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
2091 (VCVTDQ2PSYrr VR256:$src)>;
2092 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (loadv4i64 addr:$src)))),
2093 (VCVTDQ2PSYrm addr:$src)>;
2095 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
2096 (VCVTTPS2DQYrr VR256:$src)>;
2097 def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
2098 (VCVTTPS2DQYrm addr:$src)>;
2101 let Predicates = [UseSSE2] in {
2102 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2103 (CVTDQ2PSrr VR128:$src)>;
2104 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2105 (CVTDQ2PSrm addr:$src)>;
2107 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2108 (CVTDQ2PSrr VR128:$src)>;
2109 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
2110 (CVTDQ2PSrm addr:$src)>;
2112 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2113 (CVTTPS2DQrr VR128:$src)>;
2114 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
2115 (CVTTPS2DQrm addr:$src)>;
2118 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2119 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2121 (int_x86_sse2_cvttpd2dq VR128:$src))],
2122 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>;
2124 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2125 // register, but the same isn't true when using memory operands instead.
2126 // Provide other assembly rr and rm forms to address this explicitly.
2129 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
2130 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
2131 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2132 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
2133 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2134 (loadv2f64 addr:$src)))],
2135 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2138 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2139 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2141 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
2142 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2143 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2144 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2146 (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
2147 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2148 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
2149 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2151 let Predicates = [HasAVX, NoVLX] in {
2152 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
2153 (VCVTTPD2DQYrr VR256:$src)>;
2154 def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
2155 (VCVTTPD2DQYrm addr:$src)>;
2156 } // Predicates = [HasAVX]
2158 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2159 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2160 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
2161 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2162 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
2163 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2164 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2165 (memopv2f64 addr:$src)))],
2167 Sched<[WriteCvtF2ILd]>;
2169 // Convert packed single to packed double
2170 let Predicates = [HasAVX] in {
2171 // SSE2 instructions without OpSize prefix
2172 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2173 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2174 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2175 IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
2176 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2177 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2178 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2179 IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
2180 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2181 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2183 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
2184 IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2185 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2186 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2188 (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
2189 IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2192 let Predicates = [UseSSE2] in {
2193 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2194 "cvtps2pd\t{$src, $dst|$dst, $src}",
2195 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2196 IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
2197 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2198 "cvtps2pd\t{$src, $dst|$dst, $src}",
2199 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2200 IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
2203 // Convert Packed DW Integers to Packed Double FP
2204 let Predicates = [HasAVX] in {
2205 let hasSideEffects = 0, mayLoad = 1 in
2206 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2207 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2208 []>, VEX, Sched<[WriteCvtI2FLd]>;
2209 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2210 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2212 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX,
2213 Sched<[WriteCvtI2F]>;
2214 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2215 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2217 (int_x86_avx_cvtdq2_pd_256
2218 (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
2219 Sched<[WriteCvtI2FLd]>;
2220 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2221 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2223 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L,
2224 Sched<[WriteCvtI2F]>;
2227 let hasSideEffects = 0, mayLoad = 1 in
2228 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2229 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2230 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
2231 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2232 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2233 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2234 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
2236 // AVX register conversion intrinsics
2237 let Predicates = [HasAVX] in {
2238 def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
2239 (VCVTDQ2PDrr VR128:$src)>;
2240 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
2241 (VCVTDQ2PDrm addr:$src)>;
2243 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2244 (VCVTDQ2PDYrr VR128:$src)>;
2245 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2246 (VCVTDQ2PDYrm addr:$src)>;
2247 } // Predicates = [HasAVX]
2249 // SSE2 register conversion intrinsics
2250 let Predicates = [HasSSE2] in {
2251 def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
2252 (CVTDQ2PDrr VR128:$src)>;
2253 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
2254 (CVTDQ2PDrm addr:$src)>;
2255 } // Predicates = [HasSSE2]
2257 // Convert packed double to packed single
2258 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2259 // register, but the same isn't true when using memory operands instead.
2260 // Provide other assembly rr and rm forms to address this explicitly.
2261 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2262 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2263 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2264 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>;
2267 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2268 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
2269 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2270 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2272 (int_x86_sse2_cvtpd2ps (loadv2f64 addr:$src)))],
2273 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>;
2276 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2277 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2279 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2280 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2281 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2282 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2284 (int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
2285 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2286 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2287 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
2289 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2290 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2291 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2292 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>;
2293 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2294 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2296 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2297 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>;
2300 // AVX 256-bit register conversion intrinsics
2301 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2302 // whenever possible to avoid declaring two versions of each one.
2303 let Predicates = [HasAVX] in {
2304 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2305 (VCVTDQ2PSYrr VR256:$src)>;
2306 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (loadv4i64 addr:$src))),
2307 (VCVTDQ2PSYrm addr:$src)>;
2310 let Predicates = [HasAVX, NoVLX] in {
2311 // Match fround and fextend for 128/256-bit conversions
2312 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2313 (VCVTPD2PSrr VR128:$src)>;
2314 def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
2315 (VCVTPD2PSXrm addr:$src)>;
2316 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2317 (VCVTPD2PSYrr VR256:$src)>;
2318 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2319 (VCVTPD2PSYrm addr:$src)>;
2321 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2322 (VCVTPS2PDrr VR128:$src)>;
2323 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2324 (VCVTPS2PDYrr VR128:$src)>;
2325 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2326 (VCVTPS2PDYrm addr:$src)>;
2329 let Predicates = [UseSSE2] in {
2330 // Match fround and fextend for 128 conversions
2331 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2332 (CVTPD2PSrr VR128:$src)>;
2333 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2334 (CVTPD2PSrm addr:$src)>;
2336 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2337 (CVTPS2PDrr VR128:$src)>;
2340 //===----------------------------------------------------------------------===//
2341 // SSE 1 & 2 - Compare Instructions
2342 //===----------------------------------------------------------------------===//
2344 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2345 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2346 Operand CC, SDNode OpNode, ValueType VT,
2347 PatFrag ld_frag, string asm, string asm_alt,
2348 OpndItins itins, ImmLeaf immLeaf> {
2349 def rr : SIi8<0xC2, MRMSrcReg,
2350 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2351 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, immLeaf:$cc))],
2352 itins.rr>, Sched<[itins.Sched]>;
2353 def rm : SIi8<0xC2, MRMSrcMem,
2354 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2355 [(set RC:$dst, (OpNode (VT RC:$src1),
2356 (ld_frag addr:$src2), immLeaf:$cc))],
2358 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2360 // Accept explicit immediate argument form instead of comparison code.
2361 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2362 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2363 (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, [],
2364 IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
2366 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2367 (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, [],
2368 IIC_SSE_ALU_F32S_RM>,
2369 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2373 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
2374 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2375 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2376 SSE_ALU_F32S, i8immZExt5>, XS, VEX_4V, VEX_LIG;
2377 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
2378 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2379 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2380 SSE_ALU_F32S, i8immZExt5>, // same latency as 32 bit compare
2381 XD, VEX_4V, VEX_LIG;
2383 let Constraints = "$src1 = $dst" in {
2384 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
2385 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2386 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S,
2388 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
2389 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2390 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2391 SSE_ALU_F64S, i8immZExt3>, XD;
2394 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2395 Intrinsic Int, string asm, OpndItins itins,
2397 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2398 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2399 [(set VR128:$dst, (Int VR128:$src1,
2400 VR128:$src, immLeaf:$cc))],
2402 Sched<[itins.Sched]>;
2403 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2404 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2405 [(set VR128:$dst, (Int VR128:$src1,
2406 (load addr:$src), immLeaf:$cc))],
2408 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2411 let isCodeGenOnly = 1 in {
2412 // Aliases to match intrinsics which expect XMM operand(s).
2413 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2414 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2415 SSE_ALU_F32S, i8immZExt5>,
2417 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2418 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2419 SSE_ALU_F32S, i8immZExt5>, // same latency as f32
2421 let Constraints = "$src1 = $dst" in {
2422 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2423 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2424 SSE_ALU_F32S, i8immZExt3>, XS;
2425 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2426 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2427 SSE_ALU_F64S, i8immZExt3>,
2433 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2434 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2435 ValueType vt, X86MemOperand x86memop,
2436 PatFrag ld_frag, string OpcodeStr> {
2437 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2438 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2439 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2442 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2443 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2444 [(set EFLAGS, (OpNode (vt RC:$src1),
2445 (ld_frag addr:$src2)))],
2447 Sched<[WriteFAddLd, ReadAfterLd]>;
2450 let Defs = [EFLAGS] in {
2451 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2452 "ucomiss">, PS, VEX, VEX_LIG;
2453 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2454 "ucomisd">, PD, VEX, VEX_LIG;
2455 let Pattern = []<dag> in {
2456 defm VCOMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
2457 "comiss">, PS, VEX, VEX_LIG;
2458 defm VCOMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
2459 "comisd">, PD, VEX, VEX_LIG;
2462 let isCodeGenOnly = 1 in {
2463 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2464 load, "ucomiss">, PS, VEX;
2465 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2466 load, "ucomisd">, PD, VEX;
2468 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2469 load, "comiss">, PS, VEX;
2470 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2471 load, "comisd">, PD, VEX;
2473 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2475 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2478 let Pattern = []<dag> in {
2479 defm COMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
2481 defm COMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
2485 let isCodeGenOnly = 1 in {
2486 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2487 load, "ucomiss">, PS;
2488 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2489 load, "ucomisd">, PD;
2491 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2493 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2496 } // Defs = [EFLAGS]
2498 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2499 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2500 Operand CC, Intrinsic Int, string asm,
2501 string asm_alt, Domain d, ImmLeaf immLeaf,
2502 PatFrag ld_frag, OpndItins itins = SSE_ALU_F32P> {
2503 let isCommutable = 1 in
2504 def rri : PIi8<0xC2, MRMSrcReg,
2505 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2506 [(set RC:$dst, (Int RC:$src1, RC:$src2, immLeaf:$cc))],
2509 def rmi : PIi8<0xC2, MRMSrcMem,
2510 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2511 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2), immLeaf:$cc))],
2513 Sched<[WriteFAddLd, ReadAfterLd]>;
2515 // Accept explicit immediate argument form instead of comparison code.
2516 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2517 def rri_alt : PIi8<0xC2, MRMSrcReg,
2518 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
2519 asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
2521 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2522 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
2523 asm_alt, [], itins.rm, d>,
2524 Sched<[WriteFAddLd, ReadAfterLd]>;
2528 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2529 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2530 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2531 SSEPackedSingle, i8immZExt5, loadv4f32>, PS, VEX_4V;
2532 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2533 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2534 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2535 SSEPackedDouble, i8immZExt5, loadv2f64>, PD, VEX_4V;
2536 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2537 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2538 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2539 SSEPackedSingle, i8immZExt5, loadv8f32>, PS, VEX_4V, VEX_L;
2540 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2541 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2542 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2543 SSEPackedDouble, i8immZExt5, loadv4f64>, PD, VEX_4V, VEX_L;
2544 let Constraints = "$src1 = $dst" in {
2545 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2546 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2547 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2548 SSEPackedSingle, i8immZExt5, memopv4f32, SSE_ALU_F32P>, PS;
2549 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2550 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2551 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2552 SSEPackedDouble, i8immZExt5, memopv2f64, SSE_ALU_F64P>, PD;
2555 let Predicates = [HasAVX] in {
2556 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2557 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2558 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
2559 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2560 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2561 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2562 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
2563 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2565 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2566 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2567 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
2568 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2569 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2570 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2571 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
2572 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2575 let Predicates = [UseSSE1] in {
2576 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2577 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2578 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
2579 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2582 let Predicates = [UseSSE2] in {
2583 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2584 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2585 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
2586 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2589 //===----------------------------------------------------------------------===//
2590 // SSE 1 & 2 - Shuffle Instructions
2591 //===----------------------------------------------------------------------===//
2593 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2594 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2595 ValueType vt, string asm, PatFrag mem_frag,
2597 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2598 (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
2599 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2600 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2601 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2602 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2603 (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
2604 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2605 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2606 Sched<[WriteFShuffle]>;
2609 let Predicates = [HasAVX, NoVLX] in {
2610 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2611 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2612 loadv4f32, SSEPackedSingle>, PS, VEX_4V;
2613 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2614 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2615 loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
2616 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2617 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2618 loadv2f64, SSEPackedDouble>, PD, VEX_4V;
2619 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2620 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2621 loadv4f64, SSEPackedDouble>, PD, VEX_4V, VEX_L;
2623 let Constraints = "$src1 = $dst" in {
2624 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2625 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2626 memopv4f32, SSEPackedSingle>, PS;
2627 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2628 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2629 memopv2f64, SSEPackedDouble>, PD;
2632 let Predicates = [HasAVX, NoVLX] in {
2633 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2634 (bc_v4i32 (loadv2i64 addr:$src2)), (i8 imm:$imm))),
2635 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2636 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2637 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2639 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2640 (loadv2i64 addr:$src2), (i8 imm:$imm))),
2641 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2642 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2643 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2646 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2647 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2648 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2649 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
2650 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2652 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2653 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2654 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2655 (loadv4i64 addr:$src2), (i8 imm:$imm))),
2656 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2659 let Predicates = [UseSSE1] in {
2660 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2661 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2662 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2663 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2664 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2667 let Predicates = [UseSSE2] in {
2668 // Generic SHUFPD patterns
2669 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2670 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2671 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2672 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2673 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2676 //===----------------------------------------------------------------------===//
2677 // SSE 1 & 2 - Unpack FP Instructions
2678 //===----------------------------------------------------------------------===//
2680 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2681 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2682 PatFrag mem_frag, RegisterClass RC,
2683 X86MemOperand x86memop, string asm,
2685 def rr : PI<opc, MRMSrcReg,
2686 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2688 (vt (OpNode RC:$src1, RC:$src2)))],
2689 IIC_SSE_UNPCK, d>, Sched<[WriteFShuffle]>;
2690 def rm : PI<opc, MRMSrcMem,
2691 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2693 (vt (OpNode RC:$src1,
2694 (mem_frag addr:$src2))))],
2696 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2699 let Predicates = [HasAVX, NoVLX] in {
2700 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
2701 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2702 SSEPackedSingle>, PS, VEX_4V;
2703 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
2704 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2705 SSEPackedDouble>, PD, VEX_4V;
2706 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
2707 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2708 SSEPackedSingle>, PS, VEX_4V;
2709 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
2710 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2711 SSEPackedDouble>, PD, VEX_4V;
2713 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
2714 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2715 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2716 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
2717 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2718 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2719 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
2720 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2721 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2722 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
2723 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2724 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2725 }// Predicates = [HasAVX, NoVLX]
2726 let Constraints = "$src1 = $dst" in {
2727 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2728 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2729 SSEPackedSingle>, PS;
2730 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2731 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2732 SSEPackedDouble>, PD;
2733 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2734 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2735 SSEPackedSingle>, PS;
2736 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2737 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2738 SSEPackedDouble>, PD;
2739 } // Constraints = "$src1 = $dst"
2741 let Predicates = [HasAVX1Only] in {
2742 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2743 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2744 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2745 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2746 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2747 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2748 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2749 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2751 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2752 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2753 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2754 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2755 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2756 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2757 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2758 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2761 //===----------------------------------------------------------------------===//
2762 // SSE 1 & 2 - Extract Floating-Point Sign mask
2763 //===----------------------------------------------------------------------===//
2765 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2766 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2768 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2769 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2770 [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
2771 Sched<[WriteVecLogic]>;
2774 let Predicates = [HasAVX] in {
2775 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2776 "movmskps", SSEPackedSingle>, PS, VEX;
2777 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2778 "movmskpd", SSEPackedDouble>, PD, VEX;
2779 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2780 "movmskps", SSEPackedSingle>, PS,
2782 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2783 "movmskpd", SSEPackedDouble>, PD,
2786 def : Pat<(i32 (X86fgetsign FR32:$src)),
2787 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
2788 def : Pat<(i64 (X86fgetsign FR32:$src)),
2789 (SUBREG_TO_REG (i64 0),
2790 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
2791 def : Pat<(i32 (X86fgetsign FR64:$src)),
2792 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
2793 def : Pat<(i64 (X86fgetsign FR64:$src)),
2794 (SUBREG_TO_REG (i64 0),
2795 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
2798 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2799 SSEPackedSingle>, PS;
2800 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2801 SSEPackedDouble>, PD;
2803 def : Pat<(i32 (X86fgetsign FR32:$src)),
2804 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
2805 Requires<[UseSSE1]>;
2806 def : Pat<(i64 (X86fgetsign FR32:$src)),
2807 (SUBREG_TO_REG (i64 0),
2808 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
2809 Requires<[UseSSE1]>;
2810 def : Pat<(i32 (X86fgetsign FR64:$src)),
2811 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
2812 Requires<[UseSSE2]>;
2813 def : Pat<(i64 (X86fgetsign FR64:$src)),
2814 (SUBREG_TO_REG (i64 0),
2815 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
2816 Requires<[UseSSE2]>;
2818 //===---------------------------------------------------------------------===//
2819 // SSE2 - Packed Integer Logical Instructions
2820 //===---------------------------------------------------------------------===//
2822 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2824 /// PDI_binop_rm - Simple SSE2 binary operator.
2825 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2826 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2827 X86MemOperand x86memop, OpndItins itins,
2828 bit IsCommutable, bit Is2Addr> {
2829 let isCommutable = IsCommutable in
2830 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2831 (ins RC:$src1, RC:$src2),
2833 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2834 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2835 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
2836 Sched<[itins.Sched]>;
2837 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2838 (ins RC:$src1, x86memop:$src2),
2840 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2841 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2842 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2843 (bitconvert (memop_frag addr:$src2)))))],
2845 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2847 } // ExeDomain = SSEPackedInt
2849 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2850 ValueType OpVT128, ValueType OpVT256,
2851 OpndItins itins, bit IsCommutable = 0, Predicate prd> {
2852 let Predicates = [HasAVX, prd] in
2853 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2854 VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
2856 let Constraints = "$src1 = $dst" in
2857 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2858 memopv2i64, i128mem, itins, IsCommutable, 1>;
2860 let Predicates = [HasAVX2, prd] in
2861 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2862 OpVT256, VR256, loadv4i64, i256mem, itins,
2863 IsCommutable, 0>, VEX_4V, VEX_L;
2866 // These are ordered here for pattern ordering requirements with the fp versions
2868 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2869 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2870 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2871 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2872 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2873 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2874 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2875 SSE_VEC_BIT_ITINS_P, 0, NoVLX>;
2877 //===----------------------------------------------------------------------===//
2878 // SSE 1 & 2 - Logical Instructions
2879 //===----------------------------------------------------------------------===//
2881 // Multiclass for scalars using the X86 logical operation aliases for FP.
2882 multiclass sse12_fp_packed_scalar_logical_alias<
2883 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2884 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2885 FR32, f32, f128mem, loadf32_128, SSEPackedSingle, itins, 0>,
2888 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2889 FR64, f64, f128mem, loadf64_128, SSEPackedDouble, itins, 0>,
2892 let Constraints = "$src1 = $dst" in {
2893 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2894 f32, f128mem, memopfsf32_128, SSEPackedSingle, itins>, PS;
2896 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2897 f64, f128mem, memopfsf64_128, SSEPackedDouble, itins>, PD;
2901 let isCodeGenOnly = 1 in {
2902 defm FsAND : sse12_fp_packed_scalar_logical_alias<0x54, "and", X86fand,
2904 defm FsOR : sse12_fp_packed_scalar_logical_alias<0x56, "or", X86for,
2906 defm FsXOR : sse12_fp_packed_scalar_logical_alias<0x57, "xor", X86fxor,
2909 let isCommutable = 0 in
2910 defm FsANDN : sse12_fp_packed_scalar_logical_alias<0x55, "andn", X86fandn,
2914 // Multiclass for vectors using the X86 logical operation aliases for FP.
2915 multiclass sse12_fp_packed_vector_logical_alias<
2916 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2917 let Predicates = [HasAVX, NoVLX_Or_NoDQI] in {
2918 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2919 VR128, v4f32, f128mem, loadv4f32, SSEPackedSingle, itins, 0>,
2922 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2923 VR128, v2f64, f128mem, loadv2f64, SSEPackedDouble, itins, 0>,
2926 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2927 VR256, v8f32, f256mem, loadv8f32, SSEPackedSingle, itins, 0>,
2930 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2931 VR256, v4f64, f256mem, loadv4f64, SSEPackedDouble, itins, 0>,
2935 let Constraints = "$src1 = $dst" in {
2936 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2937 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins>,
2940 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2941 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins>,
2946 let isCodeGenOnly = 1 in {
2947 defm FvAND : sse12_fp_packed_vector_logical_alias<0x54, "and", X86fand,
2949 defm FvOR : sse12_fp_packed_vector_logical_alias<0x56, "or", X86for,
2951 defm FvXOR : sse12_fp_packed_vector_logical_alias<0x57, "xor", X86fxor,
2954 let isCommutable = 0 in
2955 defm FvANDN : sse12_fp_packed_vector_logical_alias<0x55, "andn", X86fandn,
2959 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2961 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2963 let Predicates = [HasAVX, NoVLX] in {
2964 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2965 !strconcat(OpcodeStr, "ps"), f256mem,
2966 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2967 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2968 (loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
2970 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2971 !strconcat(OpcodeStr, "pd"), f256mem,
2972 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2973 (bc_v4i64 (v4f64 VR256:$src2))))],
2974 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2975 (loadv4i64 addr:$src2)))], 0>,
2978 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2979 // are all promoted to v2i64, and the patterns are covered by the int
2980 // version. This is needed in SSE only, because v2i64 isn't supported on
2981 // SSE1, but only on SSE2.
2982 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2983 !strconcat(OpcodeStr, "ps"), f128mem, [],
2984 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2985 (loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
2987 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2988 !strconcat(OpcodeStr, "pd"), f128mem,
2989 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2990 (bc_v2i64 (v2f64 VR128:$src2))))],
2991 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2992 (loadv2i64 addr:$src2)))], 0>,
2996 let Constraints = "$src1 = $dst" in {
2997 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2998 !strconcat(OpcodeStr, "ps"), f128mem,
2999 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
3000 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
3001 (memopv2i64 addr:$src2)))]>, PS;
3003 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
3004 !strconcat(OpcodeStr, "pd"), f128mem,
3005 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
3006 (bc_v2i64 (v2f64 VR128:$src2))))],
3007 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
3008 (memopv2i64 addr:$src2)))]>, PD;
3012 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
3013 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
3014 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
3015 let isCommutable = 0 in
3016 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
3018 // AVX1 requires type coercions in order to fold loads directly into logical
3020 let Predicates = [HasAVX1Only] in {
3021 def : Pat<(bc_v8f32 (and VR256:$src1, (loadv4i64 addr:$src2))),
3022 (VANDPSYrm VR256:$src1, addr:$src2)>;
3023 def : Pat<(bc_v8f32 (or VR256:$src1, (loadv4i64 addr:$src2))),
3024 (VORPSYrm VR256:$src1, addr:$src2)>;
3025 def : Pat<(bc_v8f32 (xor VR256:$src1, (loadv4i64 addr:$src2))),
3026 (VXORPSYrm VR256:$src1, addr:$src2)>;
3027 def : Pat<(bc_v8f32 (X86andnp VR256:$src1, (loadv4i64 addr:$src2))),
3028 (VANDNPSYrm VR256:$src1, addr:$src2)>;
3031 //===----------------------------------------------------------------------===//
3032 // SSE 1 & 2 - Arithmetic Instructions
3033 //===----------------------------------------------------------------------===//
3035 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
3038 /// In addition, we also have a special variant of the scalar form here to
3039 /// represent the associated intrinsic operation. This form is unlike the
3040 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
3041 /// and leaves the top elements unmodified (therefore these cannot be commuted).
3043 /// These three forms can each be reg+reg or reg+mem.
3046 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
3048 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
3049 SDNode OpNode, SizeItins itins> {
3050 let Predicates = [HasAVX, NoVLX] in {
3051 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
3052 VR128, v4f32, f128mem, loadv4f32,
3053 SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
3054 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
3055 VR128, v2f64, f128mem, loadv2f64,
3056 SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
3058 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
3059 OpNode, VR256, v8f32, f256mem, loadv8f32,
3060 SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
3061 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
3062 OpNode, VR256, v4f64, f256mem, loadv4f64,
3063 SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
3066 let Constraints = "$src1 = $dst" in {
3067 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
3068 v4f32, f128mem, memopv4f32, SSEPackedSingle,
3070 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
3071 v2f64, f128mem, memopv2f64, SSEPackedDouble,
3076 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3078 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3079 OpNode, FR32, f32mem, SSEPackedSingle, itins.s, 0>,
3080 XS, VEX_4V, VEX_LIG;
3081 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3082 OpNode, FR64, f64mem, SSEPackedDouble, itins.d, 0>,
3083 XD, VEX_4V, VEX_LIG;
3085 let Constraints = "$src1 = $dst" in {
3086 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3087 OpNode, FR32, f32mem, SSEPackedSingle,
3089 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3090 OpNode, FR64, f64mem, SSEPackedDouble,
3095 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
3097 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3098 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3099 SSEPackedSingle, itins.s, 0>, XS, VEX_4V, VEX_LIG;
3100 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3101 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3102 SSEPackedDouble, itins.d, 0>, XD, VEX_4V, VEX_LIG;
3104 let Constraints = "$src1 = $dst" in {
3105 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3106 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3107 SSEPackedSingle, itins.s>, XS;
3108 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3109 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3110 SSEPackedDouble, itins.d>, XD;
3114 // Binary Arithmetic instructions
3115 defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
3116 basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
3117 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
3118 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
3119 basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
3120 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
3121 let isCommutable = 0 in {
3122 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
3123 basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
3124 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
3125 defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
3126 basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
3127 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
3128 defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
3129 basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
3130 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>;
3131 defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
3132 basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
3133 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>;
3136 let isCodeGenOnly = 1 in {
3137 defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>,
3138 basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>;
3139 defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>,
3140 basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
3143 // Patterns used to select SSE scalar fp arithmetic instructions from
3146 // (1) a scalar fp operation followed by a blend
3148 // The effect is that the backend no longer emits unnecessary vector
3149 // insert instructions immediately after SSE scalar fp instructions
3150 // like addss or mulss.
3152 // For example, given the following code:
3153 // __m128 foo(__m128 A, __m128 B) {
3158 // Previously we generated:
3159 // addss %xmm0, %xmm1
3160 // movss %xmm1, %xmm0
3163 // addss %xmm1, %xmm0
3165 // (2) a vector packed single/double fp operation followed by a vector insert
3167 // The effect is that the backend converts the packed fp instruction
3168 // followed by a vector insert into a single SSE scalar fp instruction.
3170 // For example, given the following code:
3171 // __m128 foo(__m128 A, __m128 B) {
3172 // __m128 C = A + B;
3173 // return (__m128) {c[0], a[1], a[2], a[3]};
3176 // Previously we generated:
3177 // addps %xmm0, %xmm1
3178 // movss %xmm1, %xmm0
3181 // addss %xmm1, %xmm0
3183 // TODO: Some canonicalization in lowering would simplify the number of
3184 // patterns we have to try to match.
3185 multiclass scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
3186 let Predicates = [UseSSE1] in {
3187 // extracted scalar math op with insert via movss
3188 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3189 (Op (f32 (extractelt (v4f32 VR128:$dst), (iPTR 0))),
3191 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3192 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3194 // vector math op with insert via movss
3195 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3196 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3197 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3200 // With SSE 4.1, blendi is preferred to movsd, so match that too.
3201 let Predicates = [UseSSE41] in {
3202 // extracted scalar math op with insert via blend
3203 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3204 (Op (f32 (extractelt (v4f32 VR128:$dst), (iPTR 0))),
3205 FR32:$src))), (i8 1))),
3206 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3207 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3209 // vector math op with insert via blend
3210 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3211 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3212 (!cast<I>(OpcPrefix#SSrr_Int)v4f32:$dst, v4f32:$src)>;
3216 // Repeat everything for AVX, except for the movss + scalar combo...
3217 // because that one shouldn't occur with AVX codegen?
3218 let Predicates = [HasAVX] in {
3219 // extracted scalar math op with insert via blend
3220 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3221 (Op (f32 (extractelt (v4f32 VR128:$dst), (iPTR 0))),
3222 FR32:$src))), (i8 1))),
3223 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
3224 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3226 // vector math op with insert via movss
3227 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3228 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3229 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3231 // vector math op with insert via blend
3232 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3233 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3234 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3238 defm : scalar_math_f32_patterns<fadd, "ADD">;
3239 defm : scalar_math_f32_patterns<fsub, "SUB">;
3240 defm : scalar_math_f32_patterns<fmul, "MUL">;
3241 defm : scalar_math_f32_patterns<fdiv, "DIV">;
3243 multiclass scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
3244 let Predicates = [UseSSE2] in {
3245 // extracted scalar math op with insert via movsd
3246 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3247 (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
3249 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3250 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3252 // vector math op with insert via movsd
3253 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3254 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3255 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3258 // With SSE 4.1, blendi is preferred to movsd, so match those too.
3259 let Predicates = [UseSSE41] in {
3260 // extracted scalar math op with insert via blend
3261 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3262 (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
3263 FR64:$src))), (i8 1))),
3264 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3265 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3267 // vector math op with insert via blend
3268 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3269 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3270 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3273 // Repeat everything for AVX.
3274 let Predicates = [HasAVX] in {
3275 // extracted scalar math op with insert via movsd
3276 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3277 (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
3279 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3280 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3282 // extracted scalar math op with insert via blend
3283 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3284 (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
3285 FR64:$src))), (i8 1))),
3286 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3287 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3289 // vector math op with insert via movsd
3290 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3291 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3292 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3294 // vector math op with insert via blend
3295 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3296 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3297 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3301 defm : scalar_math_f64_patterns<fadd, "ADD">;
3302 defm : scalar_math_f64_patterns<fsub, "SUB">;
3303 defm : scalar_math_f64_patterns<fmul, "MUL">;
3304 defm : scalar_math_f64_patterns<fdiv, "DIV">;
3308 /// In addition, we also have a special variant of the scalar form here to
3309 /// represent the associated intrinsic operation. This form is unlike the
3310 /// plain scalar form, in that it takes an entire vector (instead of a
3311 /// scalar) and leaves the top elements undefined.
3313 /// And, we have a special variant form for a full-vector intrinsic form.
3315 let Sched = WriteFSqrt in {
3316 def SSE_SQRTPS : OpndItins<
3317 IIC_SSE_SQRTPS_RR, IIC_SSE_SQRTPS_RM
3320 def SSE_SQRTSS : OpndItins<
3321 IIC_SSE_SQRTSS_RR, IIC_SSE_SQRTSS_RM
3324 def SSE_SQRTPD : OpndItins<
3325 IIC_SSE_SQRTPD_RR, IIC_SSE_SQRTPD_RM
3328 def SSE_SQRTSD : OpndItins<
3329 IIC_SSE_SQRTSD_RR, IIC_SSE_SQRTSD_RM
3333 let Sched = WriteFRsqrt in {
3334 def SSE_RSQRTPS : OpndItins<
3335 IIC_SSE_RSQRTPS_RR, IIC_SSE_RSQRTPS_RM
3338 def SSE_RSQRTSS : OpndItins<
3339 IIC_SSE_RSQRTSS_RR, IIC_SSE_RSQRTSS_RM
3343 let Sched = WriteFRcp in {
3344 def SSE_RCPP : OpndItins<
3345 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
3348 def SSE_RCPS : OpndItins<
3349 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
3353 /// sse_fp_unop_s - SSE1 unops in scalar form
3354 /// For the non-AVX defs, we need $src1 to be tied to $dst because
3355 /// the HW instructions are 2 operand / destructive.
3356 multiclass sse_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3357 ValueType vt, ValueType ScalarVT,
3358 X86MemOperand x86memop, Operand vec_memop,
3359 ComplexPattern mem_cpat, Intrinsic Intr,
3360 SDNode OpNode, Domain d, OpndItins itins,
3361 Predicate target, string Suffix> {
3362 let hasSideEffects = 0 in {
3363 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1),
3364 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
3365 [(set RC:$dst, (OpNode RC:$src1))], itins.rr, d>, Sched<[itins.Sched]>,
3368 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1),
3369 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
3370 [(set RC:$dst, (OpNode (load addr:$src1)))], itins.rm, d>,
3371 Sched<[itins.Sched.Folded, ReadAfterLd]>,
3372 Requires<[target, OptForSize]>;
3374 let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
3375 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3376 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3377 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3379 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, vec_memop:$src2),
3380 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3381 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3385 let Predicates = [target] in {
3386 def : Pat<(vt (OpNode mem_cpat:$src)),
3387 (vt (COPY_TO_REGCLASS (vt (!cast<Instruction>(NAME#Suffix##m_Int)
3388 (vt (IMPLICIT_DEF)), mem_cpat:$src)), RC))>;
3389 // These are unary operations, but they are modeled as having 2 source operands
3390 // because the high elements of the destination are unchanged in SSE.
3391 def : Pat<(Intr VR128:$src),
3392 (!cast<Instruction>(NAME#Suffix##r_Int) VR128:$src, VR128:$src)>;
3393 def : Pat<(Intr (load addr:$src)),
3394 (vt (COPY_TO_REGCLASS(!cast<Instruction>(NAME#Suffix##m)
3395 addr:$src), VR128))>;
3397 // We don't want to fold scalar loads into these instructions unless
3398 // optimizing for size. This is because the folded instruction will have a
3399 // partial register update, while the unfolded sequence will not, e.g.
3401 // rcpss %xmm0, %xmm0
3402 // which has a clobber before the rcp, vs.
3404 let Predicates = [target, OptForSize] in {
3405 def : Pat<(Intr mem_cpat:$src),
3406 (!cast<Instruction>(NAME#Suffix##m_Int)
3407 (vt (IMPLICIT_DEF)), mem_cpat:$src)>;
3411 multiclass avx_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3412 ValueType vt, ValueType ScalarVT,
3413 X86MemOperand x86memop, Operand vec_memop,
3414 ComplexPattern mem_cpat,
3415 Intrinsic Intr, SDNode OpNode, Domain d,
3416 OpndItins itins, string Suffix> {
3417 let hasSideEffects = 0 in {
3418 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3419 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3420 [], itins.rr, d>, Sched<[itins.Sched]>;
3422 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3423 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3424 [], itins.rm, d>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3425 let isCodeGenOnly = 1 in {
3426 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst),
3427 (ins VR128:$src1, VR128:$src2),
3428 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3429 []>, Sched<[itins.Sched.Folded]>;
3431 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst),
3432 (ins VR128:$src1, vec_memop:$src2),
3433 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3434 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3438 // We don't want to fold scalar loads into these instructions unless
3439 // optimizing for size. This is because the folded instruction will have a
3440 // partial register update, while the unfolded sequence will not, e.g.
3441 // vmovss mem, %xmm0
3442 // vrcpss %xmm0, %xmm0, %xmm0
3443 // which has a clobber before the rcp, vs.
3444 // vrcpss mem, %xmm0, %xmm0
3445 // TODO: In theory, we could fold the load, and avoid the stall caused by
3446 // the partial register store, either in ExeDepFix or with smarter RA.
3447 let Predicates = [UseAVX] in {
3448 def : Pat<(OpNode RC:$src), (!cast<Instruction>("V"#NAME#Suffix##r)
3449 (ScalarVT (IMPLICIT_DEF)), RC:$src)>;
3451 let Predicates = [HasAVX] in {
3452 def : Pat<(Intr VR128:$src),
3453 (!cast<Instruction>("V"#NAME#Suffix##r_Int) (vt (IMPLICIT_DEF)),
3456 let Predicates = [HasAVX, OptForSize] in {
3457 def : Pat<(Intr mem_cpat:$src),
3458 (!cast<Instruction>("V"#NAME#Suffix##m_Int)
3459 (vt (IMPLICIT_DEF)), mem_cpat:$src)>;
3461 let Predicates = [UseAVX, OptForSize] in {
3462 def : Pat<(ScalarVT (OpNode (load addr:$src))),
3463 (!cast<Instruction>("V"#NAME#Suffix##m) (ScalarVT (IMPLICIT_DEF)),
3465 def : Pat<(vt (OpNode mem_cpat:$src)),
3466 (!cast<Instruction>("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)),
3471 /// sse1_fp_unop_p - SSE1 unops in packed form.
3472 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3473 OpndItins itins, list<Predicate> prds> {
3474 let Predicates = prds in {
3475 def V#NAME#PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3476 !strconcat("v", OpcodeStr,
3477 "ps\t{$src, $dst|$dst, $src}"),
3478 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))],
3479 itins.rr>, VEX, Sched<[itins.Sched]>;
3480 def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3481 !strconcat("v", OpcodeStr,
3482 "ps\t{$src, $dst|$dst, $src}"),
3483 [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))],
3484 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3485 def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3486 !strconcat("v", OpcodeStr,
3487 "ps\t{$src, $dst|$dst, $src}"),
3488 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3489 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3490 def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3491 !strconcat("v", OpcodeStr,
3492 "ps\t{$src, $dst|$dst, $src}"),
3493 [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))],
3494 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3497 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3498 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3499 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>,
3500 Sched<[itins.Sched]>;
3501 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3502 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3503 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>,
3504 Sched<[itins.Sched.Folded]>;
3507 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3508 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3509 SDNode OpNode, OpndItins itins> {
3510 let Predicates = [HasAVX] in {
3511 def V#NAME#PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3512 !strconcat("v", OpcodeStr,
3513 "pd\t{$src, $dst|$dst, $src}"),
3514 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))],
3515 itins.rr>, VEX, Sched<[itins.Sched]>;
3516 def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3517 !strconcat("v", OpcodeStr,
3518 "pd\t{$src, $dst|$dst, $src}"),
3519 [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))],
3520 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3521 def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3522 !strconcat("v", OpcodeStr,
3523 "pd\t{$src, $dst|$dst, $src}"),
3524 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3525 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3526 def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3527 !strconcat("v", OpcodeStr,
3528 "pd\t{$src, $dst|$dst, $src}"),
3529 [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))],
3530 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3533 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3534 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3535 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>,
3536 Sched<[itins.Sched]>;
3537 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3538 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3539 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>,
3540 Sched<[itins.Sched.Folded]>;
3543 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3545 defm SS : sse_fp_unop_s<opc, OpcodeStr##ss, FR32, v4f32, f32, f32mem,
3546 ssmem, sse_load_f32,
3547 !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss), OpNode,
3548 SSEPackedSingle, itins, UseSSE1, "SS">, XS;
3549 defm V#NAME#SS : avx_fp_unop_s<opc, "v"#OpcodeStr##ss, FR32, v4f32, f32,
3550 f32mem, ssmem, sse_load_f32,
3551 !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss), OpNode,
3552 SSEPackedSingle, itins, "SS">, XS, VEX_4V, VEX_LIG;
3555 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3557 defm SD : sse_fp_unop_s<opc, OpcodeStr##sd, FR64, v2f64, f64, f64mem,
3558 sdmem, sse_load_f64,
3559 !cast<Intrinsic>("int_x86_sse2_"##OpcodeStr##_sd),
3560 OpNode, SSEPackedDouble, itins, UseSSE2, "SD">, XD;
3561 defm V#NAME#SD : avx_fp_unop_s<opc, "v"#OpcodeStr##sd, FR64, v2f64, f64,
3562 f64mem, sdmem, sse_load_f64,
3563 !cast<Intrinsic>("int_x86_sse2_"##OpcodeStr##_sd),
3564 OpNode, SSEPackedDouble, itins, "SD">,
3565 XD, VEX_4V, VEX_LIG;
3569 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSS>,
3570 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS, [HasAVX]>,
3571 sse2_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSD>,
3572 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>;
3574 // Reciprocal approximations. Note that these typically require refinement
3575 // in order to obtain suitable precision.
3576 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, SSE_RSQRTSS>,
3577 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_RSQRTPS, [HasAVX, NoVLX] >;
3578 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SSE_RCPS>,
3579 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP, [HasAVX, NoVLX]>;
3581 // There is no f64 version of the reciprocal approximation instructions.
3583 // TODO: We should add *scalar* op patterns for these just like we have for
3584 // the binops above. If the binop and unop patterns could all be unified
3585 // that would be even better.
3587 multiclass scalar_unary_math_patterns<Intrinsic Intr, string OpcPrefix,
3588 SDNode Move, ValueType VT,
3589 Predicate BasePredicate> {
3590 let Predicates = [BasePredicate] in {
3591 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3592 (!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3595 // With SSE 4.1, blendi is preferred to movs*, so match that too.
3596 let Predicates = [UseSSE41] in {
3597 def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
3598 (!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3601 // Repeat for AVX versions of the instructions.
3602 let Predicates = [HasAVX] in {
3603 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3604 (!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3606 def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
3607 (!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3611 defm : scalar_unary_math_patterns<int_x86_sse_rcp_ss, "RCPSS", X86Movss,
3613 defm : scalar_unary_math_patterns<int_x86_sse_rsqrt_ss, "RSQRTSS", X86Movss,
3615 defm : scalar_unary_math_patterns<int_x86_sse_sqrt_ss, "SQRTSS", X86Movss,
3617 defm : scalar_unary_math_patterns<int_x86_sse2_sqrt_sd, "SQRTSD", X86Movsd,
3621 //===----------------------------------------------------------------------===//
3622 // SSE 1 & 2 - Non-temporal stores
3623 //===----------------------------------------------------------------------===//
3625 let AddedComplexity = 400 in { // Prefer non-temporal versions
3626 let SchedRW = [WriteStore] in {
3627 let Predicates = [HasAVX, NoVLX] in {
3628 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3629 (ins f128mem:$dst, VR128:$src),
3630 "movntps\t{$src, $dst|$dst, $src}",
3631 [(alignednontemporalstore (v4f32 VR128:$src),
3633 IIC_SSE_MOVNT>, VEX;
3634 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3635 (ins f128mem:$dst, VR128:$src),
3636 "movntpd\t{$src, $dst|$dst, $src}",
3637 [(alignednontemporalstore (v2f64 VR128:$src),
3639 IIC_SSE_MOVNT>, VEX;
3641 let ExeDomain = SSEPackedInt in
3642 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3643 (ins f128mem:$dst, VR128:$src),
3644 "movntdq\t{$src, $dst|$dst, $src}",
3645 [(alignednontemporalstore (v2i64 VR128:$src),
3647 IIC_SSE_MOVNT>, VEX;
3649 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3650 (ins f256mem:$dst, VR256:$src),
3651 "movntps\t{$src, $dst|$dst, $src}",
3652 [(alignednontemporalstore (v8f32 VR256:$src),
3654 IIC_SSE_MOVNT>, VEX, VEX_L;
3655 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3656 (ins f256mem:$dst, VR256:$src),
3657 "movntpd\t{$src, $dst|$dst, $src}",
3658 [(alignednontemporalstore (v4f64 VR256:$src),
3660 IIC_SSE_MOVNT>, VEX, VEX_L;
3661 let ExeDomain = SSEPackedInt in
3662 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3663 (ins f256mem:$dst, VR256:$src),
3664 "movntdq\t{$src, $dst|$dst, $src}",
3665 [(alignednontemporalstore (v4i64 VR256:$src),
3667 IIC_SSE_MOVNT>, VEX, VEX_L;
3670 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3671 "movntps\t{$src, $dst|$dst, $src}",
3672 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3674 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3675 "movntpd\t{$src, $dst|$dst, $src}",
3676 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3679 let ExeDomain = SSEPackedInt in
3680 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3681 "movntdq\t{$src, $dst|$dst, $src}",
3682 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3685 // There is no AVX form for instructions below this point
3686 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3687 "movnti{l}\t{$src, $dst|$dst, $src}",
3688 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3690 PS, Requires<[HasSSE2]>;
3691 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3692 "movnti{q}\t{$src, $dst|$dst, $src}",
3693 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3695 PS, Requires<[HasSSE2]>;
3696 } // SchedRW = [WriteStore]
3698 let Predicates = [HasAVX2, NoVLX] in {
3699 def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst),
3700 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3701 def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst),
3702 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3703 def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst),
3704 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3707 let Predicates = [HasAVX, NoVLX] in {
3708 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3709 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3710 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3711 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3712 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3713 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3716 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3717 (MOVNTDQmr addr:$dst, VR128:$src)>;
3718 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3719 (MOVNTDQmr addr:$dst, VR128:$src)>;
3720 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3721 (MOVNTDQmr addr:$dst, VR128:$src)>;
3723 } // AddedComplexity
3725 //===----------------------------------------------------------------------===//
3726 // SSE 1 & 2 - Prefetch and memory fence
3727 //===----------------------------------------------------------------------===//
3729 // Prefetch intrinsic.
3730 let Predicates = [HasSSE1], SchedRW = [WriteLoad] in {
3731 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3732 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3733 IIC_SSE_PREFETCH>, TB;
3734 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3735 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3736 IIC_SSE_PREFETCH>, TB;
3737 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3738 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3739 IIC_SSE_PREFETCH>, TB;
3740 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3741 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3742 IIC_SSE_PREFETCH>, TB;
3745 // FIXME: How should flush instruction be modeled?
3746 let SchedRW = [WriteLoad] in {
3748 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3749 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3750 IIC_SSE_PREFETCH>, PS, Requires<[HasSSE2]>;
3753 let SchedRW = [WriteNop] in {
3754 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3755 // was introduced with SSE2, it's backward compatible.
3756 def PAUSE : I<0x90, RawFrm, (outs), (ins),
3757 "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
3758 OBXS, Requires<[HasSSE2]>;
3761 let SchedRW = [WriteFence] in {
3762 // Load, store, and memory fence
3763 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3764 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
3765 PS, Requires<[HasSSE1]>;
3766 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3767 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
3768 TB, Requires<[HasSSE2]>;
3769 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3770 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
3771 TB, Requires<[HasSSE2]>;
3774 def : Pat<(X86SFence), (SFENCE)>;
3775 def : Pat<(X86LFence), (LFENCE)>;
3776 def : Pat<(X86MFence), (MFENCE)>;
3778 //===----------------------------------------------------------------------===//
3779 // SSE 1 & 2 - Load/Store XCSR register
3780 //===----------------------------------------------------------------------===//
3782 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3783 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3784 IIC_SSE_LDMXCSR>, VEX, Sched<[WriteLoad]>;
3785 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3786 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3787 IIC_SSE_STMXCSR>, VEX, Sched<[WriteStore]>;
3789 let Predicates = [UseSSE1] in {
3790 def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
3791 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3792 IIC_SSE_LDMXCSR>, TB, Sched<[WriteLoad]>;
3793 def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3794 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3795 IIC_SSE_STMXCSR>, TB, Sched<[WriteStore]>;
3798 //===---------------------------------------------------------------------===//
3799 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3800 //===---------------------------------------------------------------------===//
3802 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3804 let hasSideEffects = 0, SchedRW = [WriteMove] in {
3805 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3806 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3808 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3809 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3811 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3812 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3814 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3815 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3820 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
3821 SchedRW = [WriteMove] in {
3822 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3823 "movdqa\t{$src, $dst|$dst, $src}", [],
3826 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3827 "movdqa\t{$src, $dst|$dst, $src}", [],
3828 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
3829 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3830 "movdqu\t{$src, $dst|$dst, $src}", [],
3833 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3834 "movdqu\t{$src, $dst|$dst, $src}", [],
3835 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
3838 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3839 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3840 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3841 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3843 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3844 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3846 let Predicates = [HasAVX] in {
3847 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3848 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3850 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3851 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3856 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3857 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3858 (ins i128mem:$dst, VR128:$src),
3859 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3861 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3862 (ins i256mem:$dst, VR256:$src),
3863 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3865 let Predicates = [HasAVX] in {
3866 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3867 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3869 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3870 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3875 let SchedRW = [WriteMove] in {
3876 let hasSideEffects = 0 in
3877 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3878 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
3880 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3881 "movdqu\t{$src, $dst|$dst, $src}",
3882 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3885 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3886 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3887 "movdqa\t{$src, $dst|$dst, $src}", [],
3890 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3891 "movdqu\t{$src, $dst|$dst, $src}",
3892 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3896 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3897 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3898 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3899 "movdqa\t{$src, $dst|$dst, $src}",
3900 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
3902 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3903 "movdqu\t{$src, $dst|$dst, $src}",
3904 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
3906 XS, Requires<[UseSSE2]>;
3909 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3910 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3911 "movdqa\t{$src, $dst|$dst, $src}",
3912 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
3914 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3915 "movdqu\t{$src, $dst|$dst, $src}",
3916 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
3918 XS, Requires<[UseSSE2]>;
3921 } // ExeDomain = SSEPackedInt
3923 let Predicates = [HasAVX] in {
3924 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3925 (VMOVDQUmr addr:$dst, VR128:$src)>;
3926 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3927 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3929 let Predicates = [UseSSE2] in
3930 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3931 (MOVDQUmr addr:$dst, VR128:$src)>;
3933 //===---------------------------------------------------------------------===//
3934 // SSE2 - Packed Integer Arithmetic Instructions
3935 //===---------------------------------------------------------------------===//
3937 let Sched = WriteVecIMul in
3938 def SSE_PMADD : OpndItins<
3939 IIC_SSE_PMADD, IIC_SSE_PMADD
3942 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3944 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3945 RegisterClass RC, PatFrag memop_frag,
3946 X86MemOperand x86memop,
3948 bit IsCommutable = 0,
3950 let isCommutable = IsCommutable in
3951 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3952 (ins RC:$src1, RC:$src2),
3954 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3955 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3956 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>,
3957 Sched<[itins.Sched]>;
3958 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3959 (ins RC:$src1, x86memop:$src2),
3961 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3962 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3963 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
3964 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3967 multiclass PDI_binop_all_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
3968 Intrinsic IntId256, OpndItins itins,
3969 bit IsCommutable = 0> {
3970 let Predicates = [HasAVX] in
3971 defm V#NAME : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId128,
3972 VR128, loadv2i64, i128mem, itins,
3973 IsCommutable, 0>, VEX_4V;
3975 let Constraints = "$src1 = $dst" in
3976 defm NAME : PDI_binop_rm_int<opc, OpcodeStr, IntId128, VR128, memopv2i64,
3977 i128mem, itins, IsCommutable, 1>;
3979 let Predicates = [HasAVX2] in
3980 defm V#NAME#Y : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId256,
3981 VR256, loadv4i64, i256mem, itins,
3982 IsCommutable, 0>, VEX_4V, VEX_L;
3985 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3986 string OpcodeStr, SDNode OpNode,
3987 SDNode OpNode2, RegisterClass RC,
3988 ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
3989 PatFrag ld_frag, ShiftOpndItins itins,
3991 // src2 is always 128-bit
3992 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3993 (ins RC:$src1, VR128:$src2),
3995 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3996 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3997 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
3998 itins.rr>, Sched<[WriteVecShift]>;
3999 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4000 (ins RC:$src1, i128mem:$src2),
4002 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4003 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4004 [(set RC:$dst, (DstVT (OpNode RC:$src1,
4005 (bc_frag (ld_frag addr:$src2)))))], itins.rm>,
4006 Sched<[WriteVecShiftLd, ReadAfterLd]>;
4007 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
4008 (ins RC:$src1, u8imm:$src2),
4010 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4011 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4012 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 imm:$src2))))], itins.ri>,
4013 Sched<[WriteVecShift]>;
4016 /// PDI_binop_rm2 - Simple SSE2 binary operator with different src and dst types
4017 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
4018 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
4019 PatFrag memop_frag, X86MemOperand x86memop,
4021 bit IsCommutable = 0, bit Is2Addr = 1> {
4022 let isCommutable = IsCommutable in
4023 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4024 (ins RC:$src1, RC:$src2),
4026 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4027 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4028 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
4029 Sched<[itins.Sched]>;
4030 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4031 (ins RC:$src1, x86memop:$src2),
4033 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4034 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4035 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
4036 (bitconvert (memop_frag addr:$src2)))))]>,
4037 Sched<[itins.Sched.Folded, ReadAfterLd]>;
4039 } // ExeDomain = SSEPackedInt
4041 defm PADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
4042 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4043 defm PADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
4044 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4045 defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
4046 SSE_INTALU_ITINS_P, 1, NoVLX>;
4047 defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
4048 SSE_INTALUQ_ITINS_P, 1, NoVLX>;
4049 defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
4050 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
4051 defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
4052 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
4053 defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
4054 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
4055 defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
4056 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4057 defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
4058 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4059 defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
4060 SSE_INTALU_ITINS_P, 0, NoVLX>;
4061 defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
4062 SSE_INTALUQ_ITINS_P, 0, NoVLX>;
4063 defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
4064 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4065 defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
4066 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4067 defm PMINUB : PDI_binop_all<0xDA, "pminub", umin, v16i8, v32i8,
4068 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4069 defm PMINSW : PDI_binop_all<0xEA, "pminsw", smin, v8i16, v16i16,
4070 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4071 defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", umax, v16i8, v32i8,
4072 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4073 defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", smax, v8i16, v16i16,
4074 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4075 defm PAVGB : PDI_binop_all<0xE0, "pavgb", X86avg, v16i8, v32i8,
4076 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4077 defm PAVGW : PDI_binop_all<0xE3, "pavgw", X86avg, v8i16, v16i16,
4078 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4081 defm PSUBSB : PDI_binop_all_int<0xE8, "psubsb", int_x86_sse2_psubs_b,
4082 int_x86_avx2_psubs_b, SSE_INTALU_ITINS_P, 0>;
4083 defm PSUBSW : PDI_binop_all_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
4084 int_x86_avx2_psubs_w, SSE_INTALU_ITINS_P, 0>;
4085 defm PADDSB : PDI_binop_all_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
4086 int_x86_avx2_padds_b, SSE_INTALU_ITINS_P, 1>;
4087 defm PADDSW : PDI_binop_all_int<0xED, "paddsw" , int_x86_sse2_padds_w,
4088 int_x86_avx2_padds_w, SSE_INTALU_ITINS_P, 1>;
4089 defm PADDUSB : PDI_binop_all_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
4090 int_x86_avx2_paddus_b, SSE_INTALU_ITINS_P, 1>;
4091 defm PADDUSW : PDI_binop_all_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
4092 int_x86_avx2_paddus_w, SSE_INTALU_ITINS_P, 1>;
4093 defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
4094 int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
4096 let Predicates = [HasAVX] in
4097 defm VPSADBW : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v2i64, v16i8, VR128,
4098 loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
4100 let Predicates = [HasAVX2] in
4101 defm VPSADBWY : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v4i64, v32i8, VR256,
4102 loadv4i64, i256mem, SSE_INTMUL_ITINS_P, 1, 0>,
4104 let Constraints = "$src1 = $dst" in
4105 defm PSADBW : PDI_binop_rm2<0xF6, "psadbw", X86psadbw, v2i64, v16i8, VR128,
4106 memopv2i64, i128mem, SSE_INTALU_ITINS_P, 1>;
4108 let Predicates = [HasAVX] in
4109 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
4110 loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
4112 let Predicates = [HasAVX2] in
4113 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
4114 VR256, loadv4i64, i256mem,
4115 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4116 let Constraints = "$src1 = $dst" in
4117 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
4118 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
4120 //===---------------------------------------------------------------------===//
4121 // SSE2 - Packed Integer Logical Instructions
4122 //===---------------------------------------------------------------------===//
4124 let Predicates = [HasAVX, NoVLX] in {
4125 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4126 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4127 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4128 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4129 VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
4130 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4132 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4133 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4134 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4135 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4136 VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
4137 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4139 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4140 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4141 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4142 } // Predicates = [HasAVX, NoVLX]
4144 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4145 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4146 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4147 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4148 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4149 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4150 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4151 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4152 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4153 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4154 } // Predicates = [HasAVX, NoVLX_Or_NoBWI]
4157 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] ,
4158 Predicates = [HasAVX, NoVLX_Or_NoBWI]in {
4159 // 128-bit logical shifts.
4160 def VPSLLDQri : PDIi8<0x73, MRM7r,
4161 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4162 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4164 (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
4166 def VPSRLDQri : PDIi8<0x73, MRM3r,
4167 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4168 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4170 (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
4172 // PSRADQri doesn't exist in SSE[1-3].
4173 } // Predicates = [HasAVX, NoVLX_Or_NoBWI]
4175 let Predicates = [HasAVX2, NoVLX] in {
4176 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4177 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4178 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4179 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4180 VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
4181 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4183 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4184 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4185 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4186 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4187 VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
4188 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4190 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4191 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4192 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4193 }// Predicates = [HasAVX2, NoVLX]
4195 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4196 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4197 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4198 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4199 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4200 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4201 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4202 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4203 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4204 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4205 }// Predicates = [HasAVX2, NoVLX_Or_NoBWI]
4207 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 ,
4208 Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4209 // 256-bit logical shifts.
4210 def VPSLLDQYri : PDIi8<0x73, MRM7r,
4211 (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
4212 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4214 (v4i64 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
4216 def VPSRLDQYri : PDIi8<0x73, MRM3r,
4217 (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
4218 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4220 (v4i64 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
4222 // PSRADQYri doesn't exist in SSE[1-3].
4223 } // Predicates = [HasAVX2, NoVLX_Or_NoBWI]
4225 let Constraints = "$src1 = $dst" in {
4226 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
4227 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4228 SSE_INTSHIFT_ITINS_P>;
4229 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
4230 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4231 SSE_INTSHIFT_ITINS_P>;
4232 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
4233 VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
4234 SSE_INTSHIFT_ITINS_P>;
4236 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
4237 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4238 SSE_INTSHIFT_ITINS_P>;
4239 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
4240 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4241 SSE_INTSHIFT_ITINS_P>;
4242 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
4243 VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
4244 SSE_INTSHIFT_ITINS_P>;
4246 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
4247 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4248 SSE_INTSHIFT_ITINS_P>;
4249 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
4250 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4251 SSE_INTSHIFT_ITINS_P>;
4253 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
4254 // 128-bit logical shifts.
4255 def PSLLDQri : PDIi8<0x73, MRM7r,
4256 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4257 "pslldq\t{$src2, $dst|$dst, $src2}",
4259 (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
4260 IIC_SSE_INTSHDQ_P_RI>;
4261 def PSRLDQri : PDIi8<0x73, MRM3r,
4262 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4263 "psrldq\t{$src2, $dst|$dst, $src2}",
4265 (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
4266 IIC_SSE_INTSHDQ_P_RI>;
4267 // PSRADQri doesn't exist in SSE[1-3].
4269 } // Constraints = "$src1 = $dst"
4271 //===---------------------------------------------------------------------===//
4272 // SSE2 - Packed Integer Comparison Instructions
4273 //===---------------------------------------------------------------------===//
4275 defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
4276 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4277 defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
4278 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4279 defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
4280 SSE_INTALU_ITINS_P, 1, NoVLX>;
4281 defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
4282 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4283 defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
4284 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4285 defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
4286 SSE_INTALU_ITINS_P, 0, NoVLX>;
4288 //===---------------------------------------------------------------------===//
4289 // SSE2 - Packed Integer Shuffle Instructions
4290 //===---------------------------------------------------------------------===//
4292 let ExeDomain = SSEPackedInt in {
4293 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
4295 let Predicates = [HasAVX] in {
4296 def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
4297 (ins VR128:$src1, u8imm:$src2),
4298 !strconcat("v", OpcodeStr,
4299 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4301 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4302 IIC_SSE_PSHUF_RI>, VEX, Sched<[WriteShuffle]>;
4303 def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
4304 (ins i128mem:$src1, u8imm:$src2),
4305 !strconcat("v", OpcodeStr,
4306 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4308 (vt128 (OpNode (bitconvert (loadv2i64 addr:$src1)),
4309 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX,
4310 Sched<[WriteShuffleLd]>;
4313 let Predicates = [HasAVX2] in {
4314 def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
4315 (ins VR256:$src1, u8imm:$src2),
4316 !strconcat("v", OpcodeStr,
4317 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4319 (vt256 (OpNode VR256:$src1, (i8 imm:$src2))))],
4320 IIC_SSE_PSHUF_RI>, VEX, VEX_L, Sched<[WriteShuffle]>;
4321 def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
4322 (ins i256mem:$src1, u8imm:$src2),
4323 !strconcat("v", OpcodeStr,
4324 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4326 (vt256 (OpNode (bitconvert (loadv4i64 addr:$src1)),
4327 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX, VEX_L,
4328 Sched<[WriteShuffleLd]>;
4331 let Predicates = [UseSSE2] in {
4332 def ri : Ii8<0x70, MRMSrcReg,
4333 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4334 !strconcat(OpcodeStr,
4335 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4337 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4338 IIC_SSE_PSHUF_RI>, Sched<[WriteShuffle]>;
4339 def mi : Ii8<0x70, MRMSrcMem,
4340 (outs VR128:$dst), (ins i128mem:$src1, u8imm:$src2),
4341 !strconcat(OpcodeStr,
4342 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4344 (vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
4345 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>,
4346 Sched<[WriteShuffleLd, ReadAfterLd]>;
4349 } // ExeDomain = SSEPackedInt
4351 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, PD;
4352 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
4353 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
4355 let Predicates = [HasAVX] in {
4356 def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
4357 (VPSHUFDmi addr:$src1, imm:$imm)>;
4358 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4359 (VPSHUFDri VR128:$src1, imm:$imm)>;
4362 let Predicates = [UseSSE2] in {
4363 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4364 (PSHUFDmi addr:$src1, imm:$imm)>;
4365 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4366 (PSHUFDri VR128:$src1, imm:$imm)>;
4369 //===---------------------------------------------------------------------===//
4370 // Packed Integer Pack Instructions (SSE & AVX)
4371 //===---------------------------------------------------------------------===//
4373 let ExeDomain = SSEPackedInt in {
4374 multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4375 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4376 PatFrag ld_frag, bit Is2Addr = 1> {
4377 def rr : PDI<opc, MRMSrcReg,
4378 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4380 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4381 !strconcat(OpcodeStr,
4382 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4384 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4385 Sched<[WriteShuffle]>;
4386 def rm : PDI<opc, MRMSrcMem,
4387 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4389 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4390 !strconcat(OpcodeStr,
4391 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4393 (OutVT (OpNode VR128:$src1,
4394 (bc_frag (ld_frag addr:$src2)))))]>,
4395 Sched<[WriteShuffleLd, ReadAfterLd]>;
4398 multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4399 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4400 def Yrr : PDI<opc, MRMSrcReg,
4401 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4402 !strconcat(OpcodeStr,
4403 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4405 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4406 Sched<[WriteShuffle]>;
4407 def Yrm : PDI<opc, MRMSrcMem,
4408 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4409 !strconcat(OpcodeStr,
4410 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4412 (OutVT (OpNode VR256:$src1,
4413 (bc_frag (loadv4i64 addr:$src2)))))]>,
4414 Sched<[WriteShuffleLd, ReadAfterLd]>;
4417 multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4418 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4419 PatFrag ld_frag, bit Is2Addr = 1> {
4420 def rr : SS48I<opc, MRMSrcReg,
4421 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4423 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4424 !strconcat(OpcodeStr,
4425 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4427 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4428 Sched<[WriteShuffle]>;
4429 def rm : SS48I<opc, MRMSrcMem,
4430 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4432 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4433 !strconcat(OpcodeStr,
4434 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4436 (OutVT (OpNode VR128:$src1,
4437 (bc_frag (ld_frag addr:$src2)))))]>,
4438 Sched<[WriteShuffleLd, ReadAfterLd]>;
4441 multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4442 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4443 def Yrr : SS48I<opc, MRMSrcReg,
4444 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4445 !strconcat(OpcodeStr,
4446 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4448 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4449 Sched<[WriteShuffle]>;
4450 def Yrm : SS48I<opc, MRMSrcMem,
4451 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4452 !strconcat(OpcodeStr,
4453 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4455 (OutVT (OpNode VR256:$src1,
4456 (bc_frag (loadv4i64 addr:$src2)))))]>,
4457 Sched<[WriteShuffleLd, ReadAfterLd]>;
4460 let Predicates = [HasAVX] in {
4461 defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
4462 bc_v8i16, loadv2i64, 0>, VEX_4V;
4463 defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
4464 bc_v4i32, loadv2i64, 0>, VEX_4V;
4466 defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
4467 bc_v8i16, loadv2i64, 0>, VEX_4V;
4468 defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
4469 bc_v4i32, loadv2i64, 0>, VEX_4V;
4472 let Predicates = [HasAVX2] in {
4473 defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss,
4474 bc_v16i16>, VEX_4V, VEX_L;
4475 defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss,
4476 bc_v8i32>, VEX_4V, VEX_L;
4478 defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus,
4479 bc_v16i16>, VEX_4V, VEX_L;
4480 defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus,
4481 bc_v8i32>, VEX_4V, VEX_L;
4484 let Constraints = "$src1 = $dst" in {
4485 defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
4486 bc_v8i16, memopv2i64>;
4487 defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
4488 bc_v4i32, memopv2i64>;
4490 defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
4491 bc_v8i16, memopv2i64>;
4493 let Predicates = [HasSSE41] in
4494 defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
4495 bc_v4i32, memopv2i64>;
4497 } // ExeDomain = SSEPackedInt
4499 //===---------------------------------------------------------------------===//
4500 // SSE2 - Packed Integer Unpack Instructions
4501 //===---------------------------------------------------------------------===//
4503 let ExeDomain = SSEPackedInt in {
4504 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4505 SDNode OpNode, PatFrag bc_frag, PatFrag ld_frag,
4507 def rr : PDI<opc, MRMSrcReg,
4508 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4510 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4511 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4512 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4513 IIC_SSE_UNPCK>, Sched<[WriteShuffle]>;
4514 def rm : PDI<opc, MRMSrcMem,
4515 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4517 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4518 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4519 [(set VR128:$dst, (OpNode VR128:$src1,
4520 (bc_frag (ld_frag addr:$src2))))],
4522 Sched<[WriteShuffleLd, ReadAfterLd]>;
4525 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4526 SDNode OpNode, PatFrag bc_frag> {
4527 def Yrr : PDI<opc, MRMSrcReg,
4528 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4529 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4530 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>,
4531 Sched<[WriteShuffle]>;
4532 def Yrm : PDI<opc, MRMSrcMem,
4533 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4534 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4535 [(set VR256:$dst, (OpNode VR256:$src1,
4536 (bc_frag (loadv4i64 addr:$src2))))]>,
4537 Sched<[WriteShuffleLd, ReadAfterLd]>;
4541 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4542 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4543 bc_v16i8, loadv2i64, 0>, VEX_4V;
4544 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4545 bc_v8i16, loadv2i64, 0>, VEX_4V;
4546 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4547 bc_v16i8, loadv2i64, 0>, VEX_4V;
4548 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4549 bc_v8i16, loadv2i64, 0>, VEX_4V;
4551 let Predicates = [HasAVX, NoVLX] in {
4552 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4553 bc_v4i32, loadv2i64, 0>, VEX_4V;
4554 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4555 bc_v2i64, loadv2i64, 0>, VEX_4V;
4556 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4557 bc_v4i32, loadv2i64, 0>, VEX_4V;
4558 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4559 bc_v2i64, loadv2i64, 0>, VEX_4V;
4562 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4563 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
4564 bc_v32i8>, VEX_4V, VEX_L;
4565 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
4566 bc_v16i16>, VEX_4V, VEX_L;
4567 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
4568 bc_v32i8>, VEX_4V, VEX_L;
4569 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
4570 bc_v16i16>, VEX_4V, VEX_L;
4572 let Predicates = [HasAVX2, NoVLX] in {
4573 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
4574 bc_v8i32>, VEX_4V, VEX_L;
4575 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
4576 bc_v4i64>, VEX_4V, VEX_L;
4577 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
4578 bc_v8i32>, VEX_4V, VEX_L;
4579 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
4580 bc_v4i64>, VEX_4V, VEX_L;
4583 let Constraints = "$src1 = $dst" in {
4584 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4585 bc_v16i8, memopv2i64>;
4586 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4587 bc_v8i16, memopv2i64>;
4588 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4589 bc_v4i32, memopv2i64>;
4590 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4591 bc_v2i64, memopv2i64>;
4593 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4594 bc_v16i8, memopv2i64>;
4595 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4596 bc_v8i16, memopv2i64>;
4597 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4598 bc_v4i32, memopv2i64>;
4599 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4600 bc_v2i64, memopv2i64>;
4602 } // ExeDomain = SSEPackedInt
4604 //===---------------------------------------------------------------------===//
4605 // SSE2 - Packed Integer Extract and Insert
4606 //===---------------------------------------------------------------------===//
4608 let ExeDomain = SSEPackedInt in {
4609 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4610 def rri : Ii8<0xC4, MRMSrcReg,
4611 (outs VR128:$dst), (ins VR128:$src1,
4612 GR32orGR64:$src2, u8imm:$src3),
4614 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4615 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4617 (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
4618 IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
4619 def rmi : Ii8<0xC4, MRMSrcMem,
4620 (outs VR128:$dst), (ins VR128:$src1,
4621 i16mem:$src2, u8imm:$src3),
4623 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4624 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4626 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4627 imm:$src3))], IIC_SSE_PINSRW>,
4628 Sched<[WriteShuffleLd, ReadAfterLd]>;
4632 let Predicates = [HasAVX, NoBWI] in
4633 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4634 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4635 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4636 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4637 imm:$src2))]>, PD, VEX,
4638 Sched<[WriteShuffle]>;
4639 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4640 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4641 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4642 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4643 imm:$src2))], IIC_SSE_PEXTRW>,
4644 Sched<[WriteShuffleLd, ReadAfterLd]>;
4647 let Predicates = [HasAVX, NoBWI] in
4648 defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
4650 let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
4651 defm PINSRW : sse2_pinsrw, PD;
4653 } // ExeDomain = SSEPackedInt
4655 //===---------------------------------------------------------------------===//
4656 // SSE2 - Packed Mask Creation
4657 //===---------------------------------------------------------------------===//
4659 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
4661 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4663 "pmovmskb\t{$src, $dst|$dst, $src}",
4664 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4665 IIC_SSE_MOVMSK>, VEX;
4667 let Predicates = [HasAVX2] in {
4668 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4670 "pmovmskb\t{$src, $dst|$dst, $src}",
4671 [(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
4675 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
4676 "pmovmskb\t{$src, $dst|$dst, $src}",
4677 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4680 } // ExeDomain = SSEPackedInt
4682 //===---------------------------------------------------------------------===//
4683 // SSE2 - Conditional Store
4684 //===---------------------------------------------------------------------===//
4686 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
4688 let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
4689 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4690 (ins VR128:$src, VR128:$mask),
4691 "maskmovdqu\t{$mask, $src|$src, $mask}",
4692 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4693 IIC_SSE_MASKMOV>, VEX;
4694 let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
4695 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4696 (ins VR128:$src, VR128:$mask),
4697 "maskmovdqu\t{$mask, $src|$src, $mask}",
4698 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4699 IIC_SSE_MASKMOV>, VEX;
4701 let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
4702 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4703 "maskmovdqu\t{$mask, $src|$src, $mask}",
4704 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4706 let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
4707 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4708 "maskmovdqu\t{$mask, $src|$src, $mask}",
4709 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4712 } // ExeDomain = SSEPackedInt
4714 //===---------------------------------------------------------------------===//
4715 // SSE2 - Move Doubleword/Quadword
4716 //===---------------------------------------------------------------------===//
4718 //===---------------------------------------------------------------------===//
4719 // Move Int Doubleword to Packed Double Int
4721 def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4722 "movd\t{$src, $dst|$dst, $src}",
4724 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4725 VEX, Sched<[WriteMove]>;
4726 def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4727 "movd\t{$src, $dst|$dst, $src}",
4729 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4731 VEX, Sched<[WriteLoad]>;
4732 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4733 "movq\t{$src, $dst|$dst, $src}",
4735 (v2i64 (scalar_to_vector GR64:$src)))],
4736 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4737 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4738 def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4739 "movq\t{$src, $dst|$dst, $src}",
4740 [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>;
4741 let isCodeGenOnly = 1 in
4742 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4743 "movq\t{$src, $dst|$dst, $src}",
4744 [(set FR64:$dst, (bitconvert GR64:$src))],
4745 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4747 def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4748 "movd\t{$src, $dst|$dst, $src}",
4750 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4752 def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4753 "movd\t{$src, $dst|$dst, $src}",
4755 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4756 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4757 def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4758 "mov{d|q}\t{$src, $dst|$dst, $src}",
4760 (v2i64 (scalar_to_vector GR64:$src)))],
4761 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4762 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4763 def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4764 "mov{d|q}\t{$src, $dst|$dst, $src}",
4765 [], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4766 let isCodeGenOnly = 1 in
4767 def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4768 "mov{d|q}\t{$src, $dst|$dst, $src}",
4769 [(set FR64:$dst, (bitconvert GR64:$src))],
4770 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4772 //===---------------------------------------------------------------------===//
4773 // Move Int Doubleword to Single Scalar
4775 let isCodeGenOnly = 1 in {
4776 def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4777 "movd\t{$src, $dst|$dst, $src}",
4778 [(set FR32:$dst, (bitconvert GR32:$src))],
4779 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4781 def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4782 "movd\t{$src, $dst|$dst, $src}",
4783 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4785 VEX, Sched<[WriteLoad]>;
4786 def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4787 "movd\t{$src, $dst|$dst, $src}",
4788 [(set FR32:$dst, (bitconvert GR32:$src))],
4789 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4791 def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4792 "movd\t{$src, $dst|$dst, $src}",
4793 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4794 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4797 //===---------------------------------------------------------------------===//
4798 // Move Packed Doubleword Int to Packed Double Int
4800 def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4801 "movd\t{$src, $dst|$dst, $src}",
4802 [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
4803 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX,
4805 def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
4806 (ins i32mem:$dst, VR128:$src),
4807 "movd\t{$src, $dst|$dst, $src}",
4808 [(store (i32 (extractelt (v4i32 VR128:$src),
4809 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
4810 VEX, Sched<[WriteStore]>;
4811 def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4812 "movd\t{$src, $dst|$dst, $src}",
4813 [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
4814 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
4816 def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4817 "movd\t{$src, $dst|$dst, $src}",
4818 [(store (i32 (extractelt (v4i32 VR128:$src),
4819 (iPTR 0))), addr:$dst)],
4820 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4822 def : Pat<(v8i32 (X86Vinsert (v8i32 immAllZerosV), GR32:$src2, (iPTR 0))),
4823 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4825 def : Pat<(v4i64 (X86Vinsert (bc_v4i64 (v8i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
4826 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4828 def : Pat<(v8i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
4829 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4831 def : Pat<(v4i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
4832 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4834 //===---------------------------------------------------------------------===//
4835 // Move Packed Doubleword Int first element to Doubleword Int
4837 let SchedRW = [WriteMove] in {
4838 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4839 "movq\t{$src, $dst|$dst, $src}",
4840 [(set GR64:$dst, (extractelt (v2i64 VR128:$src),
4845 def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4846 "mov{d|q}\t{$src, $dst|$dst, $src}",
4847 [(set GR64:$dst, (extractelt (v2i64 VR128:$src),
4852 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4853 def VMOVPQIto64rm : VRS2I<0x7E, MRMDestMem, (outs),
4854 (ins i64mem:$dst, VR128:$src),
4855 "movq\t{$src, $dst|$dst, $src}",
4856 [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4857 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4858 def MOVPQIto64rm : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4859 "mov{d|q}\t{$src, $dst|$dst, $src}",
4860 [], IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4862 //===---------------------------------------------------------------------===//
4863 // Bitcast FR64 <-> GR64
4865 let isCodeGenOnly = 1 in {
4866 let Predicates = [UseAVX] in
4867 def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4868 "movq\t{$src, $dst|$dst, $src}",
4869 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4870 VEX, Sched<[WriteLoad]>;
4871 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4872 "movq\t{$src, $dst|$dst, $src}",
4873 [(set GR64:$dst, (bitconvert FR64:$src))],
4874 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4875 def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4876 "movq\t{$src, $dst|$dst, $src}",
4877 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4878 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4880 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4881 "movq\t{$src, $dst|$dst, $src}",
4882 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
4883 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4884 def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4885 "mov{d|q}\t{$src, $dst|$dst, $src}",
4886 [(set GR64:$dst, (bitconvert FR64:$src))],
4887 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4888 def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4889 "movq\t{$src, $dst|$dst, $src}",
4890 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4891 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4894 //===---------------------------------------------------------------------===//
4895 // Move Scalar Single to Double Int
4897 let isCodeGenOnly = 1 in {
4898 def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4899 "movd\t{$src, $dst|$dst, $src}",
4900 [(set GR32:$dst, (bitconvert FR32:$src))],
4901 IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
4902 def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4903 "movd\t{$src, $dst|$dst, $src}",
4904 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4905 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4906 def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4907 "movd\t{$src, $dst|$dst, $src}",
4908 [(set GR32:$dst, (bitconvert FR32:$src))],
4909 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4910 def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4911 "movd\t{$src, $dst|$dst, $src}",
4912 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4913 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4916 let Predicates = [UseAVX] in {
4917 let AddedComplexity = 15 in {
4918 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4919 (VMOVDI2PDIrr GR32:$src)>;
4921 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
4922 (VMOV64toPQIrr GR64:$src)>;
4924 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4925 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
4926 (SUBREG_TO_REG (i64 0), (VMOV64toPQIrr GR64:$src), sub_xmm)>;
4928 // AVX 128-bit movd/movq instructions write zeros in the high 128-bit part.
4929 // These instructions also write zeros in the high part of a 256-bit register.
4930 let AddedComplexity = 20 in {
4931 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4932 (VMOVDI2PDIrm addr:$src)>;
4933 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4934 (VMOVDI2PDIrm addr:$src)>;
4935 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4936 (VMOVDI2PDIrm addr:$src)>;
4937 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4938 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
4939 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrm addr:$src), sub_xmm)>;
4941 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4942 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4943 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
4944 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src), sub_xmm)>;
4947 let Predicates = [UseSSE2] in {
4948 let AddedComplexity = 15 in {
4949 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4950 (MOVDI2PDIrr GR32:$src)>;
4952 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
4953 (MOV64toPQIrr GR64:$src)>;
4955 let AddedComplexity = 20 in {
4956 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4957 (MOVDI2PDIrm addr:$src)>;
4958 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4959 (MOVDI2PDIrm addr:$src)>;
4960 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4961 (MOVDI2PDIrm addr:$src)>;
4965 // These are the correct encodings of the instructions so that we know how to
4966 // read correct assembly, even though we continue to emit the wrong ones for
4967 // compatibility with Darwin's buggy assembler.
4968 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4969 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4970 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4971 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4972 // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
4973 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4974 (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4975 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4976 (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4978 //===---------------------------------------------------------------------===//
4979 // SSE2 - Move Quadword
4980 //===---------------------------------------------------------------------===//
4982 //===---------------------------------------------------------------------===//
4983 // Move Quadword Int to Packed Quadword Int
4986 let ExeDomain = SSEPackedInt, SchedRW = [WriteLoad] in {
4987 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4988 "vmovq\t{$src, $dst|$dst, $src}",
4990 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4991 VEX, Requires<[UseAVX]>;
4992 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4993 "movq\t{$src, $dst|$dst, $src}",
4995 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
4997 Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
4998 } // ExeDomain, SchedRW
5000 //===---------------------------------------------------------------------===//
5001 // Move Packed Quadword Int to Quadword Int
5003 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
5004 def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5005 "movq\t{$src, $dst|$dst, $src}",
5006 [(store (i64 (extractelt (v2i64 VR128:$src),
5007 (iPTR 0))), addr:$dst)],
5008 IIC_SSE_MOVDQ>, VEX;
5009 def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5010 "movq\t{$src, $dst|$dst, $src}",
5011 [(store (i64 (extractelt (v2i64 VR128:$src),
5012 (iPTR 0))), addr:$dst)],
5014 } // ExeDomain, SchedRW
5016 // For disassembler only
5017 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
5018 SchedRW = [WriteVecLogic] in {
5019 def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5020 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, VEX;
5021 def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5022 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
5025 //===---------------------------------------------------------------------===//
5026 // Store / copy lower 64-bits of a XMM register.
5028 let Predicates = [HasAVX] in
5029 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5030 (VMOVPQI2QImr addr:$dst, VR128:$src)>;
5031 let Predicates = [UseSSE2] in
5032 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5033 (MOVPQI2QImr addr:$dst, VR128:$src)>;
5035 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, AddedComplexity = 20 in {
5036 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5037 "vmovq\t{$src, $dst|$dst, $src}",
5039 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5040 (loadi64 addr:$src))))))],
5042 XS, VEX, Requires<[UseAVX]>, Sched<[WriteLoad]>;
5044 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5045 "movq\t{$src, $dst|$dst, $src}",
5047 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5048 (loadi64 addr:$src))))))],
5050 XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>;
5051 } // ExeDomain, isCodeGenOnly, AddedComplexity
5053 let Predicates = [UseAVX], AddedComplexity = 20 in {
5054 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5055 (VMOVZQI2PQIrm addr:$src)>;
5056 def : Pat<(v2i64 (X86vzload addr:$src)),
5057 (VMOVZQI2PQIrm addr:$src)>;
5058 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
5059 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
5060 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
5063 let Predicates = [UseSSE2], AddedComplexity = 20 in {
5064 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5065 (MOVZQI2PQIrm addr:$src)>;
5066 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
5069 let Predicates = [HasAVX] in {
5070 def : Pat<(v4i64 (alignedX86vzload addr:$src)),
5071 (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
5072 def : Pat<(v4i64 (X86vzload addr:$src)),
5073 (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
5076 //===---------------------------------------------------------------------===//
5077 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
5078 // IA32 document. movq xmm1, xmm2 does clear the high bits.
5080 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
5081 let AddedComplexity = 15 in
5082 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5083 "vmovq\t{$src, $dst|$dst, $src}",
5084 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5086 XS, VEX, Requires<[UseAVX]>;
5087 let AddedComplexity = 15 in
5088 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5089 "movq\t{$src, $dst|$dst, $src}",
5090 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5092 XS, Requires<[UseSSE2]>;
5093 } // ExeDomain, SchedRW
5095 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
5096 let AddedComplexity = 20 in
5097 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5098 "vmovq\t{$src, $dst|$dst, $src}",
5099 [(set VR128:$dst, (v2i64 (X86vzmovl
5100 (loadv2i64 addr:$src))))],
5102 XS, VEX, Requires<[UseAVX]>;
5103 let AddedComplexity = 20 in {
5104 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5105 "movq\t{$src, $dst|$dst, $src}",
5106 [(set VR128:$dst, (v2i64 (X86vzmovl
5107 (loadv2i64 addr:$src))))],
5109 XS, Requires<[UseSSE2]>;
5111 } // ExeDomain, isCodeGenOnly, SchedRW
5113 let AddedComplexity = 20 in {
5114 let Predicates = [UseAVX] in {
5115 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5116 (VMOVZPQILo2PQIrr VR128:$src)>;
5118 let Predicates = [UseSSE2] in {
5119 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5120 (MOVZPQILo2PQIrr VR128:$src)>;
5124 //===---------------------------------------------------------------------===//
5125 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
5126 //===---------------------------------------------------------------------===//
5127 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
5128 ValueType vt, RegisterClass RC, PatFrag mem_frag,
5129 X86MemOperand x86memop> {
5130 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
5131 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5132 [(set RC:$dst, (vt (OpNode RC:$src)))],
5133 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5134 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5135 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5136 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
5137 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5140 let Predicates = [HasAVX, NoVLX] in {
5141 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5142 v4f32, VR128, loadv4f32, f128mem>, VEX;
5143 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5144 v4f32, VR128, loadv4f32, f128mem>, VEX;
5145 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5146 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5147 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5148 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5150 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
5151 memopv4f32, f128mem>;
5152 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
5153 memopv4f32, f128mem>;
5155 let Predicates = [HasAVX, NoVLX] in {
5156 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5157 (VMOVSHDUPrr VR128:$src)>;
5158 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (loadv2i64 addr:$src)))),
5159 (VMOVSHDUPrm addr:$src)>;
5160 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5161 (VMOVSLDUPrr VR128:$src)>;
5162 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (loadv2i64 addr:$src)))),
5163 (VMOVSLDUPrm addr:$src)>;
5164 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
5165 (VMOVSHDUPYrr VR256:$src)>;
5166 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (loadv4i64 addr:$src)))),
5167 (VMOVSHDUPYrm addr:$src)>;
5168 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
5169 (VMOVSLDUPYrr VR256:$src)>;
5170 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (loadv4i64 addr:$src)))),
5171 (VMOVSLDUPYrm addr:$src)>;
5174 let Predicates = [UseSSE3] in {
5175 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5176 (MOVSHDUPrr VR128:$src)>;
5177 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
5178 (MOVSHDUPrm addr:$src)>;
5179 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5180 (MOVSLDUPrr VR128:$src)>;
5181 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
5182 (MOVSLDUPrm addr:$src)>;
5185 //===---------------------------------------------------------------------===//
5186 // SSE3 - Replicate Double FP - MOVDDUP
5187 //===---------------------------------------------------------------------===//
5189 multiclass sse3_replicate_dfp<string OpcodeStr> {
5190 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5191 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5192 [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))],
5193 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5194 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
5195 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5198 (scalar_to_vector (loadf64 addr:$src)))))],
5199 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5202 // FIXME: Merge with above classe when there're patterns for the ymm version
5203 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
5204 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
5205 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5206 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
5207 Sched<[WriteFShuffle]>;
5208 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
5209 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5211 (v4f64 (X86Movddup (loadv4f64 addr:$src))))]>,
5215 let Predicates = [HasAVX, NoVLX] in {
5216 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
5217 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
5220 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
5223 let Predicates = [HasAVX, NoVLX] in {
5224 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
5225 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5228 def : Pat<(X86Movddup (loadv4i64 addr:$src)),
5229 (VMOVDDUPYrm addr:$src)>;
5230 def : Pat<(X86Movddup (v4i64 VR256:$src)),
5231 (VMOVDDUPYrr VR256:$src)>;
5234 let Predicates = [HasAVX] in {
5235 def : Pat<(X86Movddup (bc_v2f64 (loadv4f32 addr:$src))),
5236 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5237 def : Pat<(X86Movddup (bc_v2f64 (loadv2i64 addr:$src))),
5238 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5239 def : Pat<(X86Movddup (bc_v2f64
5240 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5241 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5244 let Predicates = [UseAVX, OptForSize] in {
5245 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
5246 (VMOVDDUPrm addr:$src)>;
5247 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
5248 (VMOVDDUPrm addr:$src)>;
5251 let Predicates = [UseSSE3] in {
5252 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5253 (MOVDDUPrm addr:$src)>;
5254 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5255 (MOVDDUPrm addr:$src)>;
5256 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5257 (MOVDDUPrm addr:$src)>;
5258 def : Pat<(X86Movddup (bc_v2f64
5259 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5260 (MOVDDUPrm addr:$src)>;
5263 //===---------------------------------------------------------------------===//
5264 // SSE3 - Move Unaligned Integer
5265 //===---------------------------------------------------------------------===//
5267 let SchedRW = [WriteLoad] in {
5268 let Predicates = [HasAVX] in {
5269 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5270 "vlddqu\t{$src, $dst|$dst, $src}",
5271 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
5272 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
5273 "vlddqu\t{$src, $dst|$dst, $src}",
5274 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
5277 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5278 "lddqu\t{$src, $dst|$dst, $src}",
5279 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
5283 //===---------------------------------------------------------------------===//
5284 // SSE3 - Arithmetic
5285 //===---------------------------------------------------------------------===//
5287 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
5288 X86MemOperand x86memop, OpndItins itins,
5289 PatFrag ld_frag, bit Is2Addr = 1> {
5290 def rr : I<0xD0, MRMSrcReg,
5291 (outs RC:$dst), (ins RC:$src1, RC:$src2),
5293 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5294 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5295 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>,
5296 Sched<[itins.Sched]>;
5297 def rm : I<0xD0, MRMSrcMem,
5298 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5300 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5301 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5302 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))], itins.rr>,
5303 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5306 let Predicates = [HasAVX] in {
5307 let ExeDomain = SSEPackedSingle in {
5308 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
5309 f128mem, SSE_ALU_F32P, loadv4f32, 0>, XD, VEX_4V;
5310 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
5311 f256mem, SSE_ALU_F32P, loadv8f32, 0>, XD, VEX_4V, VEX_L;
5313 let ExeDomain = SSEPackedDouble in {
5314 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5315 f128mem, SSE_ALU_F64P, loadv2f64, 0>, PD, VEX_4V;
5316 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5317 f256mem, SSE_ALU_F64P, loadv4f64, 0>, PD, VEX_4V, VEX_L;
5320 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
5321 let ExeDomain = SSEPackedSingle in
5322 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5323 f128mem, SSE_ALU_F32P, memopv4f32>, XD;
5324 let ExeDomain = SSEPackedDouble in
5325 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5326 f128mem, SSE_ALU_F64P, memopv2f64>, PD;
5329 // Patterns used to select 'addsub' instructions.
5330 let Predicates = [HasAVX] in {
5331 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5332 (VADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5333 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (loadv4f32 addr:$rhs))),
5334 (VADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5335 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5336 (VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5337 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (loadv2f64 addr:$rhs))),
5338 (VADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5340 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 VR256:$rhs))),
5341 (VADDSUBPSYrr VR256:$lhs, VR256:$rhs)>;
5342 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (loadv8f32 addr:$rhs))),
5343 (VADDSUBPSYrm VR256:$lhs, f256mem:$rhs)>;
5344 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 VR256:$rhs))),
5345 (VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
5346 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (loadv4f64 addr:$rhs))),
5347 (VADDSUBPDYrm VR256:$lhs, f256mem:$rhs)>;
5350 let Predicates = [UseSSE3] in {
5351 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5352 (ADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5353 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (memopv4f32 addr:$rhs))),
5354 (ADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5355 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5356 (ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5357 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (memopv2f64 addr:$rhs))),
5358 (ADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5361 //===---------------------------------------------------------------------===//
5362 // SSE3 Instructions
5363 //===---------------------------------------------------------------------===//
5366 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5367 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5369 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5371 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5372 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5373 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5376 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5378 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5379 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5380 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5381 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5383 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5384 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5386 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5388 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5389 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5390 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5393 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5395 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5396 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5397 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5398 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5401 let Predicates = [HasAVX] in {
5402 let ExeDomain = SSEPackedSingle in {
5403 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5404 X86fhadd, loadv4f32, 0>, VEX_4V;
5405 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5406 X86fhsub, loadv4f32, 0>, VEX_4V;
5407 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5408 X86fhadd, loadv8f32, 0>, VEX_4V, VEX_L;
5409 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5410 X86fhsub, loadv8f32, 0>, VEX_4V, VEX_L;
5412 let ExeDomain = SSEPackedDouble in {
5413 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5414 X86fhadd, loadv2f64, 0>, VEX_4V;
5415 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5416 X86fhsub, loadv2f64, 0>, VEX_4V;
5417 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5418 X86fhadd, loadv4f64, 0>, VEX_4V, VEX_L;
5419 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5420 X86fhsub, loadv4f64, 0>, VEX_4V, VEX_L;
5424 let Constraints = "$src1 = $dst" in {
5425 let ExeDomain = SSEPackedSingle in {
5426 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd,
5428 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub,
5431 let ExeDomain = SSEPackedDouble in {
5432 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd,
5434 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub,
5439 //===---------------------------------------------------------------------===//
5440 // SSSE3 - Packed Absolute Instructions
5441 //===---------------------------------------------------------------------===//
5444 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5445 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
5447 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5449 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5450 [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
5451 Sched<[WriteVecALU]>;
5453 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5455 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5458 (bitconvert (ld_frag addr:$src))))], IIC_SSE_PABS_RM>,
5459 Sched<[WriteVecALULd]>;
5462 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5463 multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
5464 Intrinsic IntId256> {
5465 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5467 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5468 [(set VR256:$dst, (IntId256 VR256:$src))]>,
5469 Sched<[WriteVecALU]>;
5471 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5473 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5476 (bitconvert (loadv4i64 addr:$src))))]>,
5477 Sched<[WriteVecALULd]>;
5480 // Helper fragments to match sext vXi1 to vXiY.
5481 def v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)),
5483 def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i8 15)))>;
5484 def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i8 31)))>;
5485 def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
5487 def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
5488 def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
5490 let Predicates = [HasAVX] in {
5491 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", int_x86_ssse3_pabs_b_128,
5493 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", int_x86_ssse3_pabs_w_128,
5495 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", int_x86_ssse3_pabs_d_128,
5499 (bc_v2i64 (v16i1sextv16i8)),
5500 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5501 (VPABSBrr128 VR128:$src)>;
5503 (bc_v2i64 (v8i1sextv8i16)),
5504 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5505 (VPABSWrr128 VR128:$src)>;
5507 (bc_v2i64 (v4i1sextv4i32)),
5508 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5509 (VPABSDrr128 VR128:$src)>;
5512 let Predicates = [HasAVX2] in {
5513 defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
5514 int_x86_avx2_pabs_b>, VEX, VEX_L;
5515 defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
5516 int_x86_avx2_pabs_w>, VEX, VEX_L;
5517 defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
5518 int_x86_avx2_pabs_d>, VEX, VEX_L;
5521 (bc_v4i64 (v32i1sextv32i8)),
5522 (bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))),
5523 (VPABSBrr256 VR256:$src)>;
5525 (bc_v4i64 (v16i1sextv16i16)),
5526 (bc_v4i64 (add (v16i16 VR256:$src), (v16i1sextv16i16)))),
5527 (VPABSWrr256 VR256:$src)>;
5529 (bc_v4i64 (v8i1sextv8i32)),
5530 (bc_v4i64 (add (v8i32 VR256:$src), (v8i1sextv8i32)))),
5531 (VPABSDrr256 VR256:$src)>;
5534 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", int_x86_ssse3_pabs_b_128,
5536 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", int_x86_ssse3_pabs_w_128,
5538 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", int_x86_ssse3_pabs_d_128,
5541 let Predicates = [HasSSSE3] in {
5543 (bc_v2i64 (v16i1sextv16i8)),
5544 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5545 (PABSBrr128 VR128:$src)>;
5547 (bc_v2i64 (v8i1sextv8i16)),
5548 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5549 (PABSWrr128 VR128:$src)>;
5551 (bc_v2i64 (v4i1sextv4i32)),
5552 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5553 (PABSDrr128 VR128:$src)>;
5556 //===---------------------------------------------------------------------===//
5557 // SSSE3 - Packed Binary Operator Instructions
5558 //===---------------------------------------------------------------------===//
5560 let Sched = WriteVecALU in {
5561 def SSE_PHADDSUBD : OpndItins<
5562 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5564 def SSE_PHADDSUBSW : OpndItins<
5565 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5567 def SSE_PHADDSUBW : OpndItins<
5568 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5571 let Sched = WriteShuffle in
5572 def SSE_PSHUFB : OpndItins<
5573 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5575 let Sched = WriteVecALU in
5576 def SSE_PSIGN : OpndItins<
5577 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5579 let Sched = WriteVecIMul in
5580 def SSE_PMULHRSW : OpndItins<
5581 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5584 /// SS3I_binop_rm - Simple SSSE3 bin op
5585 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5586 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5587 X86MemOperand x86memop, OpndItins itins,
5589 let isCommutable = 1 in
5590 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5591 (ins RC:$src1, RC:$src2),
5593 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5594 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5595 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5596 Sched<[itins.Sched]>;
5597 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5598 (ins RC:$src1, x86memop:$src2),
5600 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5601 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5603 (OpVT (OpNode RC:$src1,
5604 (bitconvert (memop_frag addr:$src2)))))], itins.rm>,
5605 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5608 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5609 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5610 Intrinsic IntId128, OpndItins itins,
5611 PatFrag ld_frag, bit Is2Addr = 1> {
5612 let isCommutable = 1 in
5613 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5614 (ins VR128:$src1, VR128:$src2),
5616 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5617 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5618 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5619 Sched<[itins.Sched]>;
5620 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5621 (ins VR128:$src1, i128mem:$src2),
5623 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5624 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5626 (IntId128 VR128:$src1,
5627 (bitconvert (ld_frag addr:$src2))))]>,
5628 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5631 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5633 X86FoldableSchedWrite Sched> {
5634 let isCommutable = 1 in
5635 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5636 (ins VR256:$src1, VR256:$src2),
5637 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5638 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5640 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5641 (ins VR256:$src1, i256mem:$src2),
5642 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5644 (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
5645 Sched<[Sched.Folded, ReadAfterLd]>;
5648 let ImmT = NoImm, Predicates = [HasAVX] in {
5649 let isCommutable = 0 in {
5650 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5652 SSE_PHADDSUBW, 0>, VEX_4V;
5653 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5655 SSE_PHADDSUBD, 0>, VEX_4V;
5656 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5658 SSE_PHADDSUBW, 0>, VEX_4V;
5659 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5661 SSE_PHADDSUBD, 0>, VEX_4V;
5662 defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
5664 SSE_PSIGN, 0>, VEX_4V;
5665 defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
5667 SSE_PSIGN, 0>, VEX_4V;
5668 defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
5670 SSE_PSIGN, 0>, VEX_4V;
5671 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5673 SSE_PSHUFB, 0>, VEX_4V;
5674 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5675 int_x86_ssse3_phadd_sw_128,
5676 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5677 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5678 int_x86_ssse3_phsub_sw_128,
5679 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5680 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5681 int_x86_ssse3_pmadd_ub_sw_128,
5682 SSE_PMADD, loadv2i64, 0>, VEX_4V;
5684 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5685 int_x86_ssse3_pmul_hr_sw_128,
5686 SSE_PMULHRSW, loadv2i64, 0>, VEX_4V;
5689 let ImmT = NoImm, Predicates = [HasAVX2] in {
5690 let isCommutable = 0 in {
5691 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5693 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5694 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5696 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5697 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5699 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5700 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5702 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5703 defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
5705 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5706 defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
5708 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5709 defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
5711 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5712 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5714 SSE_PSHUFB, 0>, VEX_4V, VEX_L;
5715 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5716 int_x86_avx2_phadd_sw,
5717 WriteVecALU>, VEX_4V, VEX_L;
5718 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5719 int_x86_avx2_phsub_sw,
5720 WriteVecALU>, VEX_4V, VEX_L;
5721 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5722 int_x86_avx2_pmadd_ub_sw,
5723 WriteVecIMul>, VEX_4V, VEX_L;
5725 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5726 int_x86_avx2_pmul_hr_sw,
5727 WriteVecIMul>, VEX_4V, VEX_L;
5730 // None of these have i8 immediate fields.
5731 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5732 let isCommutable = 0 in {
5733 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5734 memopv2i64, i128mem, SSE_PHADDSUBW>;
5735 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5736 memopv2i64, i128mem, SSE_PHADDSUBD>;
5737 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5738 memopv2i64, i128mem, SSE_PHADDSUBW>;
5739 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5740 memopv2i64, i128mem, SSE_PHADDSUBD>;
5741 defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
5742 memopv2i64, i128mem, SSE_PSIGN>;
5743 defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
5744 memopv2i64, i128mem, SSE_PSIGN>;
5745 defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
5746 memopv2i64, i128mem, SSE_PSIGN>;
5747 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5748 memopv2i64, i128mem, SSE_PSHUFB>;
5749 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5750 int_x86_ssse3_phadd_sw_128,
5751 SSE_PHADDSUBSW, memopv2i64>;
5752 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5753 int_x86_ssse3_phsub_sw_128,
5754 SSE_PHADDSUBSW, memopv2i64>;
5755 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5756 int_x86_ssse3_pmadd_ub_sw_128,
5757 SSE_PMADD, memopv2i64>;
5759 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5760 int_x86_ssse3_pmul_hr_sw_128,
5761 SSE_PMULHRSW, memopv2i64>;
5764 //===---------------------------------------------------------------------===//
5765 // SSSE3 - Packed Align Instruction Patterns
5766 //===---------------------------------------------------------------------===//
5768 multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
5769 let hasSideEffects = 0 in {
5770 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
5771 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
5773 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5775 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5776 [], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
5778 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
5779 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
5781 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5783 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5784 [], IIC_SSE_PALIGNRM>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5788 multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
5789 let hasSideEffects = 0 in {
5790 def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
5791 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
5793 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5794 []>, Sched<[WriteShuffle]>;
5796 def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
5797 (ins VR256:$src1, i256mem:$src2, u8imm:$src3),
5799 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5800 []>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5804 let Predicates = [HasAVX] in
5805 defm VPALIGN : ssse3_palignr<"vpalignr", 0>, VEX_4V;
5806 let Predicates = [HasAVX2] in
5807 defm VPALIGN : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
5808 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
5809 defm PALIGN : ssse3_palignr<"palignr">;
5811 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5812 def : Pat<(v8i32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5813 (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
5814 def : Pat<(v8f32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5815 (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
5816 def : Pat<(v16i16 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5817 (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
5818 def : Pat<(v32i8 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5819 (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
5822 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5823 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5824 (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5825 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5826 (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5827 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5828 (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5829 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5830 (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5833 let Predicates = [UseSSSE3] in {
5834 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5835 (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5836 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5837 (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5838 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5839 (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5840 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5841 (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
5844 //===---------------------------------------------------------------------===//
5845 // SSSE3 - Thread synchronization
5846 //===---------------------------------------------------------------------===//
5848 let SchedRW = [WriteSystem] in {
5849 let usesCustomInserter = 1 in {
5850 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5851 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5852 Requires<[HasSSE3]>;
5855 let Uses = [EAX, ECX, EDX] in
5856 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
5857 TB, Requires<[HasSSE3]>;
5858 let Uses = [ECX, EAX] in
5859 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5860 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
5861 TB, Requires<[HasSSE3]>;
5864 def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
5865 def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
5867 def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
5868 Requires<[Not64BitMode]>;
5869 def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
5870 Requires<[In64BitMode]>;
5872 //===----------------------------------------------------------------------===//
5873 // SSE4.1 - Packed Move with Sign/Zero Extend
5874 //===----------------------------------------------------------------------===//
5876 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
5877 RegisterClass OutRC, RegisterClass InRC,
5879 def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
5880 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5882 Sched<[itins.Sched]>;
5884 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
5885 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5887 itins.rm>, Sched<[itins.Sched.Folded]>;
5890 multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
5891 X86MemOperand MemOp, X86MemOperand MemYOp,
5892 OpndItins SSEItins, OpndItins AVXItins,
5893 OpndItins AVX2Itins> {
5894 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
5895 let Predicates = [HasAVX, NoVLX] in
5896 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
5897 VR128, VR128, AVXItins>, VEX;
5898 let Predicates = [HasAVX2, NoVLX] in
5899 defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
5900 VR256, VR128, AVX2Itins>, VEX, VEX_L;
5903 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr,
5904 X86MemOperand MemOp, X86MemOperand MemYOp> {
5905 defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
5907 SSE_INTALU_ITINS_SHUFF_P,
5908 DEFAULT_ITINS_SHUFFLESCHED,
5909 DEFAULT_ITINS_SHUFFLESCHED>;
5910 defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
5911 !strconcat("pmovzx", OpcodeStr),
5913 SSE_INTALU_ITINS_SHUFF_P,
5914 DEFAULT_ITINS_SHUFFLESCHED,
5915 DEFAULT_ITINS_SHUFFLESCHED>;
5918 defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem>;
5919 defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem>;
5920 defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem>;
5922 defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem>;
5923 defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
5925 defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
5928 multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
5929 // Register-Register patterns
5930 def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
5931 (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
5932 def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
5933 (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
5934 def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
5935 (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
5937 def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
5938 (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
5939 def : Pat<(v4i64 (ExtOp (v8i16 VR128:$src))),
5940 (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
5942 def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
5943 (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
5945 // On AVX2, we also support 256bit inputs.
5946 def : Pat<(v16i16 (ExtOp (v32i8 VR256:$src))),
5947 (!cast<I>(OpcPrefix#BWYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5948 def : Pat<(v8i32 (ExtOp (v32i8 VR256:$src))),
5949 (!cast<I>(OpcPrefix#BDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5950 def : Pat<(v4i64 (ExtOp (v32i8 VR256:$src))),
5951 (!cast<I>(OpcPrefix#BQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5953 def : Pat<(v8i32 (ExtOp (v16i16 VR256:$src))),
5954 (!cast<I>(OpcPrefix#WDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5955 def : Pat<(v4i64 (ExtOp (v16i16 VR256:$src))),
5956 (!cast<I>(OpcPrefix#WQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5958 def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))),
5959 (!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5961 // Simple Register-Memory patterns
5962 def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5963 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5964 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5965 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5966 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5967 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5969 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5970 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5971 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5972 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5974 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5975 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5977 // AVX2 Register-Memory patterns
5978 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5979 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5980 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5981 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5982 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5983 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5984 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5985 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5987 def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5988 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5989 def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5990 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5991 def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5992 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5993 def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5994 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5996 def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5997 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5998 def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
5999 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6000 def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6001 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6002 def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6003 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6005 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6006 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6007 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6008 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6009 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6010 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6011 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6012 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6014 def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6015 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6016 def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6017 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6018 def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6019 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6020 def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6021 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6023 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6024 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6025 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6026 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6027 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6028 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6029 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6030 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6033 let Predicates = [HasAVX2, NoVLX] in {
6034 defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
6035 defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
6038 // SSE4.1/AVX patterns.
6039 multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
6040 SDNode ExtOp, PatFrag ExtLoad16> {
6041 def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
6042 (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
6043 def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
6044 (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
6045 def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
6046 (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
6048 def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
6049 (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
6050 def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
6051 (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
6053 def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
6054 (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
6056 def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6057 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6058 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6059 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6060 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6061 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6063 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6064 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6065 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6066 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6068 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
6069 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6071 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6072 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6073 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6074 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6075 def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6076 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6077 def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6078 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6079 def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6080 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6082 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6083 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6084 def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6085 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6086 def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6087 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6088 def : Pat<(v4i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6089 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6091 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
6092 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6093 def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6094 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6095 def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6096 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6097 def : Pat<(v2i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6098 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6100 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6101 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6102 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6103 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6104 def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6105 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6106 def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6107 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6108 def : Pat<(v4i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6109 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6111 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6112 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6113 def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
6114 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6115 def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6116 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6117 def : Pat<(v2i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6118 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6120 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6121 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6122 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6123 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6124 def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6125 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6126 def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6127 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6128 def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6129 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6132 let Predicates = [HasAVX, NoVLX] in {
6133 defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
6134 defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
6137 let Predicates = [UseSSE41] in {
6138 defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
6139 defm : SS41I_pmovx_patterns<"PMOVZX", "z", X86vzext, loadi16_anyext>;
6142 //===----------------------------------------------------------------------===//
6143 // SSE4.1 - Extract Instructions
6144 //===----------------------------------------------------------------------===//
6146 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
6147 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
6148 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6149 (ins VR128:$src1, u8imm:$src2),
6150 !strconcat(OpcodeStr,
6151 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6152 [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
6154 Sched<[WriteShuffle]>;
6155 let hasSideEffects = 0, mayStore = 1,
6156 SchedRW = [WriteShuffleLd, WriteRMW] in
6157 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6158 (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
6159 !strconcat(OpcodeStr,
6160 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6161 [(store (i8 (trunc (assertzext (X86pextrb (v16i8 VR128:$src1),
6162 imm:$src2)))), addr:$dst)]>;
6165 let Predicates = [HasAVX, NoBWI] in
6166 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
6168 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
6171 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
6172 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
6173 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
6174 def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6175 (ins VR128:$src1, u8imm:$src2),
6176 !strconcat(OpcodeStr,
6177 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6178 []>, Sched<[WriteShuffle]>;
6180 let hasSideEffects = 0, mayStore = 1,
6181 SchedRW = [WriteShuffleLd, WriteRMW] in
6182 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6183 (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
6184 !strconcat(OpcodeStr,
6185 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6186 [(store (i16 (trunc (assertzext (X86pextrw (v8i16 VR128:$src1),
6187 imm:$src2)))), addr:$dst)]>;
6190 let Predicates = [HasAVX, NoBWI] in
6191 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
6193 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
6196 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6197 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
6198 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6199 (ins VR128:$src1, u8imm:$src2),
6200 !strconcat(OpcodeStr,
6201 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6203 (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
6204 Sched<[WriteShuffle]>;
6205 let SchedRW = [WriteShuffleLd, WriteRMW] in
6206 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6207 (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
6208 !strconcat(OpcodeStr,
6209 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6210 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
6214 let Predicates = [HasAVX, NoDQI] in
6215 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
6217 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
6219 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6220 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
6221 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
6222 (ins VR128:$src1, u8imm:$src2),
6223 !strconcat(OpcodeStr,
6224 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6226 (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
6227 Sched<[WriteShuffle]>, REX_W;
6228 let SchedRW = [WriteShuffleLd, WriteRMW] in
6229 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6230 (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
6231 !strconcat(OpcodeStr,
6232 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6233 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
6234 addr:$dst)]>, REX_W;
6237 let Predicates = [HasAVX, NoDQI] in
6238 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
6240 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
6242 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
6244 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
6245 OpndItins itins = DEFAULT_ITINS> {
6246 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6247 (ins VR128:$src1, u8imm:$src2),
6248 !strconcat(OpcodeStr,
6249 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6250 [(set GR32orGR64:$dst,
6251 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
6252 itins.rr>, Sched<[WriteFBlend]>;
6253 let SchedRW = [WriteFBlendLd, WriteRMW] in
6254 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6255 (ins f32mem:$dst, VR128:$src1, u8imm:$src2),
6256 !strconcat(OpcodeStr,
6257 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6258 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
6259 addr:$dst)], itins.rm>;
6262 let ExeDomain = SSEPackedSingle in {
6263 let Predicates = [UseAVX] in
6264 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
6265 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps", SSE_EXTRACT_ITINS>;
6268 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
6269 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6272 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6274 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6277 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6278 Requires<[UseSSE41]>;
6280 //===----------------------------------------------------------------------===//
6281 // SSE4.1 - Insert Instructions
6282 //===----------------------------------------------------------------------===//
6284 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
6285 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6286 (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
6288 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6290 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6292 (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
6293 Sched<[WriteShuffle]>;
6294 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6295 (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
6297 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6299 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6301 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
6302 imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6305 let Predicates = [HasAVX, NoBWI] in
6306 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
6307 let Constraints = "$src1 = $dst" in
6308 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
6310 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
6311 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6312 (ins VR128:$src1, GR32:$src2, u8imm:$src3),
6314 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6316 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6318 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
6319 Sched<[WriteShuffle]>;
6320 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6321 (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
6323 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6325 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6327 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
6328 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6331 let Predicates = [HasAVX, NoDQI] in
6332 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
6333 let Constraints = "$src1 = $dst" in
6334 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
6336 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
6337 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6338 (ins VR128:$src1, GR64:$src2, u8imm:$src3),
6340 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6342 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6344 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
6345 Sched<[WriteShuffle]>;
6346 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6347 (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
6349 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6351 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6353 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
6354 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6357 let Predicates = [HasAVX, NoDQI] in
6358 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
6359 let Constraints = "$src1 = $dst" in
6360 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
6362 // insertps has a few different modes, there's the first two here below which
6363 // are optimized inserts that won't zero arbitrary elements in the destination
6364 // vector. The next one matches the intrinsic and could zero arbitrary elements
6365 // in the target vector.
6366 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
6367 OpndItins itins = DEFAULT_ITINS> {
6368 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6369 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6371 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6373 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6375 (X86insertps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
6376 Sched<[WriteFShuffle]>;
6377 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6378 (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
6380 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6382 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6384 (X86insertps VR128:$src1,
6385 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6386 imm:$src3))], itins.rm>,
6387 Sched<[WriteFShuffleLd, ReadAfterLd]>;
6390 let ExeDomain = SSEPackedSingle in {
6391 let Predicates = [UseAVX] in
6392 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6393 let Constraints = "$src1 = $dst" in
6394 defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1, SSE_INSERT_ITINS>;
6397 let Predicates = [UseSSE41] in {
6398 // If we're inserting an element from a load or a null pshuf of a load,
6399 // fold the load into the insertps instruction.
6400 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd (v4f32
6401 (scalar_to_vector (loadf32 addr:$src2))), (i8 0)),
6403 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6404 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd
6405 (loadv4f32 addr:$src2), (i8 0)), imm:$src3)),
6406 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6409 let Predicates = [UseAVX] in {
6410 // If we're inserting an element from a vbroadcast of a load, fold the
6411 // load into the X86insertps instruction.
6412 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6413 (X86VBroadcast (loadf32 addr:$src2)), imm:$src3)),
6414 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6415 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6416 (X86VBroadcast (loadv4f32 addr:$src2)), imm:$src3)),
6417 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6420 //===----------------------------------------------------------------------===//
6421 // SSE4.1 - Round Instructions
6422 //===----------------------------------------------------------------------===//
6424 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6425 X86MemOperand x86memop, RegisterClass RC,
6426 PatFrag mem_frag32, PatFrag mem_frag64,
6427 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6428 let ExeDomain = SSEPackedSingle in {
6429 // Intrinsic operation, reg.
6430 // Vector intrinsic operation, reg
6431 def PSr : SS4AIi8<opcps, MRMSrcReg,
6432 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6433 !strconcat(OpcodeStr,
6434 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6435 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
6436 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6438 // Vector intrinsic operation, mem
6439 def PSm : SS4AIi8<opcps, MRMSrcMem,
6440 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6441 !strconcat(OpcodeStr,
6442 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6444 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))],
6445 IIC_SSE_ROUNDPS_MEM>, Sched<[WriteFAddLd]>;
6446 } // ExeDomain = SSEPackedSingle
6448 let ExeDomain = SSEPackedDouble in {
6449 // Vector intrinsic operation, reg
6450 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6451 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6452 !strconcat(OpcodeStr,
6453 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6454 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
6455 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6457 // Vector intrinsic operation, mem
6458 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6459 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6460 !strconcat(OpcodeStr,
6461 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6463 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))],
6464 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAddLd]>;
6465 } // ExeDomain = SSEPackedDouble
6468 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6471 Intrinsic F64Int, bit Is2Addr = 1> {
6472 let ExeDomain = GenericDomain in {
6474 let hasSideEffects = 0 in
6475 def SSr : SS4AIi8<opcss, MRMSrcReg,
6476 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32u8imm:$src3),
6478 !strconcat(OpcodeStr,
6479 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6480 !strconcat(OpcodeStr,
6481 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6482 []>, Sched<[WriteFAdd]>;
6484 // Intrinsic operation, reg.
6485 let isCodeGenOnly = 1 in
6486 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6487 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6489 !strconcat(OpcodeStr,
6490 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6491 !strconcat(OpcodeStr,
6492 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6493 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6496 // Intrinsic operation, mem.
6497 def SSm : SS4AIi8<opcss, MRMSrcMem,
6498 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32u8imm:$src3),
6500 !strconcat(OpcodeStr,
6501 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6502 !strconcat(OpcodeStr,
6503 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6505 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6506 Sched<[WriteFAddLd, ReadAfterLd]>;
6509 let hasSideEffects = 0 in
6510 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6511 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32u8imm:$src3),
6513 !strconcat(OpcodeStr,
6514 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6515 !strconcat(OpcodeStr,
6516 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6517 []>, Sched<[WriteFAdd]>;
6519 // Intrinsic operation, reg.
6520 let isCodeGenOnly = 1 in
6521 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6522 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6524 !strconcat(OpcodeStr,
6525 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6526 !strconcat(OpcodeStr,
6527 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6528 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6531 // Intrinsic operation, mem.
6532 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6533 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32u8imm:$src3),
6535 !strconcat(OpcodeStr,
6536 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6537 !strconcat(OpcodeStr,
6538 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6540 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6541 Sched<[WriteFAddLd, ReadAfterLd]>;
6542 } // ExeDomain = GenericDomain
6545 // FP round - roundss, roundps, roundsd, roundpd
6546 let Predicates = [HasAVX] in {
6548 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6549 loadv4f32, loadv2f64,
6550 int_x86_sse41_round_ps,
6551 int_x86_sse41_round_pd>, VEX;
6552 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6553 loadv8f32, loadv4f64,
6554 int_x86_avx_round_ps_256,
6555 int_x86_avx_round_pd_256>, VEX, VEX_L;
6556 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6557 int_x86_sse41_round_ss,
6558 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6561 let Predicates = [UseAVX] in {
6562 def : Pat<(ffloor FR32:$src),
6563 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x9))>;
6564 def : Pat<(f64 (ffloor FR64:$src)),
6565 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x9))>;
6566 def : Pat<(f32 (fnearbyint FR32:$src)),
6567 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6568 def : Pat<(f64 (fnearbyint FR64:$src)),
6569 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6570 def : Pat<(f32 (fceil FR32:$src)),
6571 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xA))>;
6572 def : Pat<(f64 (fceil FR64:$src)),
6573 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xA))>;
6574 def : Pat<(f32 (frint FR32:$src)),
6575 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6576 def : Pat<(f64 (frint FR64:$src)),
6577 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6578 def : Pat<(f32 (ftrunc FR32:$src)),
6579 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xB))>;
6580 def : Pat<(f64 (ftrunc FR64:$src)),
6581 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xB))>;
6584 let Predicates = [HasAVX] in {
6585 def : Pat<(v4f32 (ffloor VR128:$src)),
6586 (VROUNDPSr VR128:$src, (i32 0x9))>;
6587 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6588 (VROUNDPSr VR128:$src, (i32 0xC))>;
6589 def : Pat<(v4f32 (fceil VR128:$src)),
6590 (VROUNDPSr VR128:$src, (i32 0xA))>;
6591 def : Pat<(v4f32 (frint VR128:$src)),
6592 (VROUNDPSr VR128:$src, (i32 0x4))>;
6593 def : Pat<(v4f32 (ftrunc VR128:$src)),
6594 (VROUNDPSr VR128:$src, (i32 0xB))>;
6596 def : Pat<(v2f64 (ffloor VR128:$src)),
6597 (VROUNDPDr VR128:$src, (i32 0x9))>;
6598 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6599 (VROUNDPDr VR128:$src, (i32 0xC))>;
6600 def : Pat<(v2f64 (fceil VR128:$src)),
6601 (VROUNDPDr VR128:$src, (i32 0xA))>;
6602 def : Pat<(v2f64 (frint VR128:$src)),
6603 (VROUNDPDr VR128:$src, (i32 0x4))>;
6604 def : Pat<(v2f64 (ftrunc VR128:$src)),
6605 (VROUNDPDr VR128:$src, (i32 0xB))>;
6607 def : Pat<(v8f32 (ffloor VR256:$src)),
6608 (VROUNDYPSr VR256:$src, (i32 0x9))>;
6609 def : Pat<(v8f32 (fnearbyint VR256:$src)),
6610 (VROUNDYPSr VR256:$src, (i32 0xC))>;
6611 def : Pat<(v8f32 (fceil VR256:$src)),
6612 (VROUNDYPSr VR256:$src, (i32 0xA))>;
6613 def : Pat<(v8f32 (frint VR256:$src)),
6614 (VROUNDYPSr VR256:$src, (i32 0x4))>;
6615 def : Pat<(v8f32 (ftrunc VR256:$src)),
6616 (VROUNDYPSr VR256:$src, (i32 0xB))>;
6618 def : Pat<(v4f64 (ffloor VR256:$src)),
6619 (VROUNDYPDr VR256:$src, (i32 0x9))>;
6620 def : Pat<(v4f64 (fnearbyint VR256:$src)),
6621 (VROUNDYPDr VR256:$src, (i32 0xC))>;
6622 def : Pat<(v4f64 (fceil VR256:$src)),
6623 (VROUNDYPDr VR256:$src, (i32 0xA))>;
6624 def : Pat<(v4f64 (frint VR256:$src)),
6625 (VROUNDYPDr VR256:$src, (i32 0x4))>;
6626 def : Pat<(v4f64 (ftrunc VR256:$src)),
6627 (VROUNDYPDr VR256:$src, (i32 0xB))>;
6630 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6631 memopv4f32, memopv2f64,
6632 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6633 let Constraints = "$src1 = $dst" in
6634 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6635 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6637 let Predicates = [UseSSE41] in {
6638 def : Pat<(ffloor FR32:$src),
6639 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x9))>;
6640 def : Pat<(f64 (ffloor FR64:$src)),
6641 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x9))>;
6642 def : Pat<(f32 (fnearbyint FR32:$src)),
6643 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6644 def : Pat<(f64 (fnearbyint FR64:$src)),
6645 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6646 def : Pat<(f32 (fceil FR32:$src)),
6647 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xA))>;
6648 def : Pat<(f64 (fceil FR64:$src)),
6649 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xA))>;
6650 def : Pat<(f32 (frint FR32:$src)),
6651 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6652 def : Pat<(f64 (frint FR64:$src)),
6653 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6654 def : Pat<(f32 (ftrunc FR32:$src)),
6655 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xB))>;
6656 def : Pat<(f64 (ftrunc FR64:$src)),
6657 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xB))>;
6659 def : Pat<(v4f32 (ffloor VR128:$src)),
6660 (ROUNDPSr VR128:$src, (i32 0x9))>;
6661 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6662 (ROUNDPSr VR128:$src, (i32 0xC))>;
6663 def : Pat<(v4f32 (fceil VR128:$src)),
6664 (ROUNDPSr VR128:$src, (i32 0xA))>;
6665 def : Pat<(v4f32 (frint VR128:$src)),
6666 (ROUNDPSr VR128:$src, (i32 0x4))>;
6667 def : Pat<(v4f32 (ftrunc VR128:$src)),
6668 (ROUNDPSr VR128:$src, (i32 0xB))>;
6670 def : Pat<(v2f64 (ffloor VR128:$src)),
6671 (ROUNDPDr VR128:$src, (i32 0x9))>;
6672 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6673 (ROUNDPDr VR128:$src, (i32 0xC))>;
6674 def : Pat<(v2f64 (fceil VR128:$src)),
6675 (ROUNDPDr VR128:$src, (i32 0xA))>;
6676 def : Pat<(v2f64 (frint VR128:$src)),
6677 (ROUNDPDr VR128:$src, (i32 0x4))>;
6678 def : Pat<(v2f64 (ftrunc VR128:$src)),
6679 (ROUNDPDr VR128:$src, (i32 0xB))>;
6682 //===----------------------------------------------------------------------===//
6683 // SSE4.1 - Packed Bit Test
6684 //===----------------------------------------------------------------------===//
6686 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6687 // the intel intrinsic that corresponds to this.
6688 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6689 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6690 "vptest\t{$src2, $src1|$src1, $src2}",
6691 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6692 Sched<[WriteVecLogic]>, VEX;
6693 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6694 "vptest\t{$src2, $src1|$src1, $src2}",
6695 [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
6696 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6698 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6699 "vptest\t{$src2, $src1|$src1, $src2}",
6700 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6701 Sched<[WriteVecLogic]>, VEX, VEX_L;
6702 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6703 "vptest\t{$src2, $src1|$src1, $src2}",
6704 [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
6705 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX, VEX_L;
6708 let Defs = [EFLAGS] in {
6709 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6710 "ptest\t{$src2, $src1|$src1, $src2}",
6711 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6712 Sched<[WriteVecLogic]>;
6713 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6714 "ptest\t{$src2, $src1|$src1, $src2}",
6715 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6716 Sched<[WriteVecLogicLd, ReadAfterLd]>;
6719 // The bit test instructions below are AVX only
6720 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6721 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6722 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6723 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6724 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
6725 Sched<[WriteVecLogic]>, VEX;
6726 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6727 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6728 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6729 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6732 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6733 let ExeDomain = SSEPackedSingle in {
6734 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32>;
6735 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32>,
6738 let ExeDomain = SSEPackedDouble in {
6739 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64>;
6740 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64>,
6745 //===----------------------------------------------------------------------===//
6746 // SSE4.1 - Misc Instructions
6747 //===----------------------------------------------------------------------===//
6749 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6750 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6751 "popcnt{w}\t{$src, $dst|$dst, $src}",
6752 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)],
6753 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6755 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6756 "popcnt{w}\t{$src, $dst|$dst, $src}",
6757 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6758 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6759 Sched<[WriteFAddLd]>, OpSize16, XS;
6761 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6762 "popcnt{l}\t{$src, $dst|$dst, $src}",
6763 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)],
6764 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6767 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6768 "popcnt{l}\t{$src, $dst|$dst, $src}",
6769 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6770 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6771 Sched<[WriteFAddLd]>, OpSize32, XS;
6773 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6774 "popcnt{q}\t{$src, $dst|$dst, $src}",
6775 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)],
6776 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>, XS;
6777 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6778 "popcnt{q}\t{$src, $dst|$dst, $src}",
6779 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6780 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6781 Sched<[WriteFAddLd]>, XS;
6786 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6787 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6788 Intrinsic IntId128, PatFrag ld_frag,
6789 X86FoldableSchedWrite Sched> {
6790 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6792 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6793 [(set VR128:$dst, (IntId128 VR128:$src))]>,
6795 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6797 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6799 (IntId128 (bitconvert (ld_frag addr:$src))))]>,
6800 Sched<[Sched.Folded]>;
6803 // PHMIN has the same profile as PSAD, thus we use the same scheduling
6804 // model, although the naming is misleading.
6805 let Predicates = [HasAVX] in
6806 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6807 int_x86_sse41_phminposuw, loadv2i64,
6809 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6810 int_x86_sse41_phminposuw, memopv2i64,
6813 /// SS48I_binop_rm - Simple SSE41 binary operator.
6814 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6815 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6816 X86MemOperand x86memop, bit Is2Addr = 1,
6817 OpndItins itins = SSE_INTALU_ITINS_P> {
6818 let isCommutable = 1 in
6819 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6820 (ins RC:$src1, RC:$src2),
6822 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6823 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6824 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6825 Sched<[itins.Sched]>;
6826 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6827 (ins RC:$src1, x86memop:$src2),
6829 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6830 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6832 (OpVT (OpNode RC:$src1, (bitconvert (memop_frag addr:$src2)))))]>,
6833 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6836 /// SS48I_binop_rm2 - Simple SSE41 binary operator with different src and dst
6838 multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
6839 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
6840 PatFrag memop_frag, X86MemOperand x86memop,
6842 bit IsCommutable = 0, bit Is2Addr = 1> {
6843 let isCommutable = IsCommutable in
6844 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6845 (ins RC:$src1, RC:$src2),
6847 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6848 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6849 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
6850 Sched<[itins.Sched]>;
6851 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6852 (ins RC:$src1, x86memop:$src2),
6854 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6855 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6856 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
6857 (bitconvert (memop_frag addr:$src2)))))]>,
6858 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6861 let Predicates = [HasAVX, NoVLX] in {
6862 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
6863 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6865 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", smin, v4i32, VR128,
6866 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6868 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", umin, v4i32, VR128,
6869 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6871 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
6872 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6874 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
6875 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6877 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
6878 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6880 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
6881 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6883 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v8i16, VR128,
6884 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6886 defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
6887 VR128, loadv2i64, i128mem,
6888 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
6891 let Predicates = [HasAVX2, NoVLX] in {
6892 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
6893 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6895 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", smin, v8i32, VR256,
6896 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6898 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", umin, v8i32, VR256,
6899 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6901 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
6902 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6904 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
6905 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6907 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
6908 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6910 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
6911 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6913 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v16i16, VR256,
6914 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6916 defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
6917 VR256, loadv4i64, i256mem,
6918 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
6921 let Constraints = "$src1 = $dst" in {
6922 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", smin, v16i8, VR128,
6923 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6924 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", smin, v4i32, VR128,
6925 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6926 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", umin, v4i32, VR128,
6927 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6928 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", umin, v8i16, VR128,
6929 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6930 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", smax, v16i8, VR128,
6931 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6932 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", smax, v4i32, VR128,
6933 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6934 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", umax, v4i32, VR128,
6935 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6936 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", umax, v8i16, VR128,
6937 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6938 defm PMULDQ : SS48I_binop_rm2<0x28, "pmuldq", X86pmuldq, v2i64, v4i32,
6939 VR128, memopv2i64, i128mem,
6940 SSE_INTMUL_ITINS_P, 1>;
6943 let Predicates = [HasAVX, NoVLX] in {
6944 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6945 memopv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
6947 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6948 memopv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6951 let Predicates = [HasAVX2] in {
6952 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6953 loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
6955 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6956 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6960 let Constraints = "$src1 = $dst" in {
6961 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6962 memopv2i64, i128mem, 1, SSE_PMULLD_ITINS>;
6963 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6964 memopv2i64, i128mem, 1, SSE_INTALUQ_ITINS_P>;
6967 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
6968 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
6969 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
6970 X86MemOperand x86memop, bit Is2Addr = 1,
6971 OpndItins itins = DEFAULT_ITINS> {
6972 let isCommutable = 1 in
6973 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6974 (ins RC:$src1, RC:$src2, u8imm:$src3),
6976 !strconcat(OpcodeStr,
6977 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6978 !strconcat(OpcodeStr,
6979 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6980 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
6981 Sched<[itins.Sched]>;
6982 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6983 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6985 !strconcat(OpcodeStr,
6986 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6987 !strconcat(OpcodeStr,
6988 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6991 (bitconvert (memop_frag addr:$src2)), imm:$src3))], itins.rm>,
6992 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6995 /// SS41I_binop_rmi - SSE 4.1 binary operator with 8-bit immediate
6996 multiclass SS41I_binop_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
6997 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6998 X86MemOperand x86memop, bit Is2Addr = 1,
6999 OpndItins itins = DEFAULT_ITINS> {
7000 let isCommutable = 1 in
7001 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
7002 (ins RC:$src1, RC:$src2, u8imm:$src3),
7004 !strconcat(OpcodeStr,
7005 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7006 !strconcat(OpcodeStr,
7007 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7008 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))],
7009 itins.rr>, Sched<[itins.Sched]>;
7010 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
7011 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
7013 !strconcat(OpcodeStr,
7014 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7015 !strconcat(OpcodeStr,
7016 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7018 (OpVT (OpNode RC:$src1,
7019 (bitconvert (memop_frag addr:$src2)), imm:$src3)))], itins.rm>,
7020 Sched<[itins.Sched.Folded, ReadAfterLd]>;
7023 let Predicates = [HasAVX] in {
7024 let isCommutable = 0 in {
7025 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
7026 VR128, loadv2i64, i128mem, 0,
7027 DEFAULT_ITINS_MPSADSCHED>, VEX_4V;
7030 let ExeDomain = SSEPackedSingle in {
7031 defm VBLENDPS : SS41I_binop_rmi<0x0C, "vblendps", X86Blendi, v4f32,
7032 VR128, loadv4f32, f128mem, 0,
7033 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7034 defm VBLENDPSY : SS41I_binop_rmi<0x0C, "vblendps", X86Blendi, v8f32,
7035 VR256, loadv8f32, f256mem, 0,
7036 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V, VEX_L;
7038 let ExeDomain = SSEPackedDouble in {
7039 defm VBLENDPD : SS41I_binop_rmi<0x0D, "vblendpd", X86Blendi, v2f64,
7040 VR128, loadv2f64, f128mem, 0,
7041 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7042 defm VBLENDPDY : SS41I_binop_rmi<0x0D, "vblendpd", X86Blendi, v4f64,
7043 VR256, loadv4f64, f256mem, 0,
7044 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V, VEX_L;
7046 defm VPBLENDW : SS41I_binop_rmi<0x0E, "vpblendw", X86Blendi, v8i16,
7047 VR128, loadv2i64, i128mem, 0,
7048 DEFAULT_ITINS_BLENDSCHED>, VEX_4V;
7050 let ExeDomain = SSEPackedSingle in
7051 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
7052 VR128, loadv4f32, f128mem, 0,
7053 SSE_DPPS_ITINS>, VEX_4V;
7054 let ExeDomain = SSEPackedDouble in
7055 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
7056 VR128, loadv2f64, f128mem, 0,
7057 SSE_DPPS_ITINS>, VEX_4V;
7058 let ExeDomain = SSEPackedSingle in
7059 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
7060 VR256, loadv8f32, i256mem, 0,
7061 SSE_DPPS_ITINS>, VEX_4V, VEX_L;
7064 let Predicates = [HasAVX2] in {
7065 let isCommutable = 0 in {
7066 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
7067 VR256, loadv4i64, i256mem, 0,
7068 DEFAULT_ITINS_MPSADSCHED>, VEX_4V, VEX_L;
7070 defm VPBLENDWY : SS41I_binop_rmi<0x0E, "vpblendw", X86Blendi, v16i16,
7071 VR256, loadv4i64, i256mem, 0,
7072 DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
7075 let Constraints = "$src1 = $dst" in {
7076 let isCommutable = 0 in {
7077 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
7078 VR128, memopv2i64, i128mem,
7079 1, SSE_MPSADBW_ITINS>;
7081 let ExeDomain = SSEPackedSingle in
7082 defm BLENDPS : SS41I_binop_rmi<0x0C, "blendps", X86Blendi, v4f32,
7083 VR128, memopv4f32, f128mem,
7084 1, SSE_INTALU_ITINS_FBLEND_P>;
7085 let ExeDomain = SSEPackedDouble in
7086 defm BLENDPD : SS41I_binop_rmi<0x0D, "blendpd", X86Blendi, v2f64,
7087 VR128, memopv2f64, f128mem,
7088 1, SSE_INTALU_ITINS_FBLEND_P>;
7089 defm PBLENDW : SS41I_binop_rmi<0x0E, "pblendw", X86Blendi, v8i16,
7090 VR128, memopv2i64, i128mem,
7091 1, SSE_INTALU_ITINS_BLEND_P>;
7092 let ExeDomain = SSEPackedSingle in
7093 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
7094 VR128, memopv4f32, f128mem, 1,
7096 let ExeDomain = SSEPackedDouble in
7097 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
7098 VR128, memopv2f64, f128mem, 1,
7102 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
7103 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
7104 RegisterClass RC, X86MemOperand x86memop,
7105 PatFrag mem_frag, Intrinsic IntId,
7106 X86FoldableSchedWrite Sched> {
7107 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
7108 (ins RC:$src1, RC:$src2, RC:$src3),
7109 !strconcat(OpcodeStr,
7110 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7111 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
7112 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7115 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
7116 (ins RC:$src1, x86memop:$src2, RC:$src3),
7117 !strconcat(OpcodeStr,
7118 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7120 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
7122 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7123 Sched<[Sched.Folded, ReadAfterLd]>;
7126 let Predicates = [HasAVX] in {
7127 let ExeDomain = SSEPackedDouble in {
7128 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
7129 loadv2f64, int_x86_sse41_blendvpd,
7131 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
7132 loadv4f64, int_x86_avx_blendv_pd_256,
7133 WriteFVarBlend>, VEX_L;
7134 } // ExeDomain = SSEPackedDouble
7135 let ExeDomain = SSEPackedSingle in {
7136 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
7137 loadv4f32, int_x86_sse41_blendvps,
7139 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
7140 loadv8f32, int_x86_avx_blendv_ps_256,
7141 WriteFVarBlend>, VEX_L;
7142 } // ExeDomain = SSEPackedSingle
7143 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
7144 loadv2i64, int_x86_sse41_pblendvb,
7148 let Predicates = [HasAVX2] in {
7149 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
7150 loadv4i64, int_x86_avx2_pblendvb,
7151 WriteVarBlend>, VEX_L;
7154 let Predicates = [HasAVX] in {
7155 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
7156 (v16i8 VR128:$src2))),
7157 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7158 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
7159 (v4i32 VR128:$src2))),
7160 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7161 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
7162 (v4f32 VR128:$src2))),
7163 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7164 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
7165 (v2i64 VR128:$src2))),
7166 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7167 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
7168 (v2f64 VR128:$src2))),
7169 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7170 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
7171 (v8i32 VR256:$src2))),
7172 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7173 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
7174 (v8f32 VR256:$src2))),
7175 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7176 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
7177 (v4i64 VR256:$src2))),
7178 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7179 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
7180 (v4f64 VR256:$src2))),
7181 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7184 let Predicates = [HasAVX2] in {
7185 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
7186 (v32i8 VR256:$src2))),
7187 (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7191 // FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
7192 // on targets where they have equal performance. These were changed to use
7193 // blends because blends have better throughput on SandyBridge and Haswell, but
7194 // movs[s/d] are 1-2 byte shorter instructions.
7195 let Predicates = [UseAVX] in {
7196 let AddedComplexity = 15 in {
7197 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
7198 // MOVS{S,D} to the lower bits.
7199 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
7200 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
7201 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7202 (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7203 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7204 (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7205 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
7206 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
7208 // Move low f32 and clear high bits.
7209 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
7210 (VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
7212 // Move low f64 and clear high bits.
7213 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
7214 (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
7217 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
7218 (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
7219 (SUBREG_TO_REG (i32 0),
7220 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
7222 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
7223 (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
7224 (SUBREG_TO_REG (i64 0),
7225 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
7228 // These will incur an FP/int domain crossing penalty, but it may be the only
7229 // way without AVX2. Do not add any complexity because we may be able to match
7230 // more optimal patterns defined earlier in this file.
7231 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
7232 (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
7233 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
7234 (VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
7237 // FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
7238 // on targets where they have equal performance. These were changed to use
7239 // blends because blends have better throughput on SandyBridge and Haswell, but
7240 // movs[s/d] are 1-2 byte shorter instructions.
7241 let Predicates = [UseSSE41] in {
7242 // With SSE41 we can use blends for these patterns.
7243 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7244 (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7245 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7246 (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7247 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
7248 (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;
7252 /// SS41I_ternary_int - SSE 4.1 ternary operator
7253 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
7254 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7255 X86MemOperand x86memop, Intrinsic IntId,
7256 OpndItins itins = DEFAULT_ITINS> {
7257 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
7258 (ins VR128:$src1, VR128:$src2),
7259 !strconcat(OpcodeStr,
7260 "\t{$src2, $dst|$dst, $src2}"),
7261 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))],
7262 itins.rr>, Sched<[itins.Sched]>;
7264 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
7265 (ins VR128:$src1, x86memop:$src2),
7266 !strconcat(OpcodeStr,
7267 "\t{$src2, $dst|$dst, $src2}"),
7270 (bitconvert (mem_frag addr:$src2)), XMM0))],
7271 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
7275 let ExeDomain = SSEPackedDouble in
7276 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
7277 int_x86_sse41_blendvpd,
7278 DEFAULT_ITINS_FBLENDSCHED>;
7279 let ExeDomain = SSEPackedSingle in
7280 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
7281 int_x86_sse41_blendvps,
7282 DEFAULT_ITINS_FBLENDSCHED>;
7283 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
7284 int_x86_sse41_pblendvb,
7285 DEFAULT_ITINS_VARBLENDSCHED>;
7287 // Aliases with the implicit xmm0 argument
7288 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7289 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
7290 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7291 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
7292 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7293 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
7294 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7295 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
7296 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7297 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
7298 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7299 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
7301 let Predicates = [UseSSE41] in {
7302 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
7303 (v16i8 VR128:$src2))),
7304 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
7305 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
7306 (v4i32 VR128:$src2))),
7307 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7308 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
7309 (v4f32 VR128:$src2))),
7310 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7311 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
7312 (v2i64 VR128:$src2))),
7313 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7314 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
7315 (v2f64 VR128:$src2))),
7316 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7319 let SchedRW = [WriteLoad] in {
7320 let Predicates = [HasAVX] in
7321 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7322 "vmovntdqa\t{$src, $dst|$dst, $src}",
7323 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
7325 let Predicates = [HasAVX2] in
7326 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
7327 "vmovntdqa\t{$src, $dst|$dst, $src}",
7328 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
7330 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7331 "movntdqa\t{$src, $dst|$dst, $src}",
7332 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
7335 //===----------------------------------------------------------------------===//
7336 // SSE4.2 - Compare Instructions
7337 //===----------------------------------------------------------------------===//
7339 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
7340 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7341 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
7342 X86MemOperand x86memop, bit Is2Addr = 1> {
7343 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
7344 (ins RC:$src1, RC:$src2),
7346 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7347 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7348 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>;
7349 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
7350 (ins RC:$src1, x86memop:$src2),
7352 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7353 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7355 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>;
7358 let Predicates = [HasAVX] in
7359 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
7360 loadv2i64, i128mem, 0>, VEX_4V;
7362 let Predicates = [HasAVX2] in
7363 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
7364 loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
7366 let Constraints = "$src1 = $dst" in
7367 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
7368 memopv2i64, i128mem>;
7370 //===----------------------------------------------------------------------===//
7371 // SSE4.2 - String/text Processing Instructions
7372 //===----------------------------------------------------------------------===//
7374 // Packed Compare Implicit Length Strings, Return Mask
7375 multiclass pseudo_pcmpistrm<string asm, PatFrag ld_frag> {
7376 def REG : PseudoI<(outs VR128:$dst),
7377 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7378 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
7380 def MEM : PseudoI<(outs VR128:$dst),
7381 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7382 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
7383 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7386 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7387 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128", loadv2i64>,
7389 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128", memopv2i64>,
7390 Requires<[UseSSE42]>;
7393 multiclass pcmpistrm_SS42AI<string asm> {
7394 def rr : SS42AI<0x62, MRMSrcReg, (outs),
7395 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7396 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7397 []>, Sched<[WritePCmpIStrM]>;
7399 def rm :SS42AI<0x62, MRMSrcMem, (outs),
7400 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7401 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7402 []>, Sched<[WritePCmpIStrMLd, ReadAfterLd]>;
7405 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
7406 let Predicates = [HasAVX] in
7407 defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
7408 defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
7411 // Packed Compare Explicit Length Strings, Return Mask
7412 multiclass pseudo_pcmpestrm<string asm, PatFrag ld_frag> {
7413 def REG : PseudoI<(outs VR128:$dst),
7414 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7415 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
7416 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7417 def MEM : PseudoI<(outs VR128:$dst),
7418 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7419 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
7420 (bc_v16i8 (ld_frag addr:$src3)), EDX, imm:$src5))]>;
7423 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7424 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128", loadv2i64>,
7426 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128", memopv2i64>,
7427 Requires<[UseSSE42]>;
7430 multiclass SS42AI_pcmpestrm<string asm> {
7431 def rr : SS42AI<0x60, MRMSrcReg, (outs),
7432 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7433 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7434 []>, Sched<[WritePCmpEStrM]>;
7436 def rm : SS42AI<0x60, MRMSrcMem, (outs),
7437 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7438 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7439 []>, Sched<[WritePCmpEStrMLd, ReadAfterLd]>;
7442 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7443 let Predicates = [HasAVX] in
7444 defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
7445 defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
7448 // Packed Compare Implicit Length Strings, Return Index
7449 multiclass pseudo_pcmpistri<string asm, PatFrag ld_frag> {
7450 def REG : PseudoI<(outs GR32:$dst),
7451 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7452 [(set GR32:$dst, EFLAGS,
7453 (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
7454 def MEM : PseudoI<(outs GR32:$dst),
7455 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7456 [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
7457 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7460 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7461 defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI", loadv2i64>,
7463 defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI", memopv2i64>,
7464 Requires<[UseSSE42]>;
7467 multiclass SS42AI_pcmpistri<string asm> {
7468 def rr : SS42AI<0x63, MRMSrcReg, (outs),
7469 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7470 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7471 []>, Sched<[WritePCmpIStrI]>;
7473 def rm : SS42AI<0x63, MRMSrcMem, (outs),
7474 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7475 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7476 []>, Sched<[WritePCmpIStrILd, ReadAfterLd]>;
7479 let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
7480 let Predicates = [HasAVX] in
7481 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
7482 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
7485 // Packed Compare Explicit Length Strings, Return Index
7486 multiclass pseudo_pcmpestri<string asm, PatFrag ld_frag> {
7487 def REG : PseudoI<(outs GR32:$dst),
7488 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7489 [(set GR32:$dst, EFLAGS,
7490 (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7491 def MEM : PseudoI<(outs GR32:$dst),
7492 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7493 [(set GR32:$dst, EFLAGS,
7494 (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (ld_frag addr:$src3)), EDX,
7498 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7499 defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI", loadv2i64>,
7501 defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI", memopv2i64>,
7502 Requires<[UseSSE42]>;
7505 multiclass SS42AI_pcmpestri<string asm> {
7506 def rr : SS42AI<0x61, MRMSrcReg, (outs),
7507 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7508 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7509 []>, Sched<[WritePCmpEStrI]>;
7511 def rm : SS42AI<0x61, MRMSrcMem, (outs),
7512 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7513 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7514 []>, Sched<[WritePCmpEStrILd, ReadAfterLd]>;
7517 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7518 let Predicates = [HasAVX] in
7519 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
7520 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
7523 //===----------------------------------------------------------------------===//
7524 // SSE4.2 - CRC Instructions
7525 //===----------------------------------------------------------------------===//
7527 // No CRC instructions have AVX equivalents
7529 // crc intrinsic instruction
7530 // This set of instructions are only rm, the only difference is the size
7532 class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
7533 RegisterClass RCIn, SDPatternOperator Int> :
7534 SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
7535 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7536 [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>,
7539 class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
7540 X86MemOperand x86memop, SDPatternOperator Int> :
7541 SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
7542 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7543 [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))],
7544 IIC_CRC32_MEM>, Sched<[WriteFAddLd, ReadAfterLd]>;
7546 let Constraints = "$src1 = $dst" in {
7547 def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
7548 int_x86_sse42_crc32_32_8>;
7549 def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
7550 int_x86_sse42_crc32_32_8>;
7551 def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
7552 int_x86_sse42_crc32_32_16>, OpSize16;
7553 def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
7554 int_x86_sse42_crc32_32_16>, OpSize16;
7555 def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
7556 int_x86_sse42_crc32_32_32>, OpSize32;
7557 def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
7558 int_x86_sse42_crc32_32_32>, OpSize32;
7559 def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
7560 int_x86_sse42_crc32_64_64>, REX_W;
7561 def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
7562 int_x86_sse42_crc32_64_64>, REX_W;
7563 let hasSideEffects = 0 in {
7565 def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
7567 def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
7572 //===----------------------------------------------------------------------===//
7573 // SHA-NI Instructions
7574 //===----------------------------------------------------------------------===//
7576 multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
7578 def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
7579 (ins VR128:$src1, VR128:$src2),
7580 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7582 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
7583 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>, T8;
7585 def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
7586 (ins VR128:$src1, i128mem:$src2),
7587 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7589 (set VR128:$dst, (IntId VR128:$src1,
7590 (bc_v4i32 (memopv2i64 addr:$src2)), XMM0)),
7591 (set VR128:$dst, (IntId VR128:$src1,
7592 (bc_v4i32 (memopv2i64 addr:$src2)))))]>, T8;
7595 let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
7596 def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
7597 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7598 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7600 (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
7601 (i8 imm:$src3)))]>, TA;
7602 def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
7603 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7604 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7606 (int_x86_sha1rnds4 VR128:$src1,
7607 (bc_v4i32 (memopv2i64 addr:$src2)),
7608 (i8 imm:$src3)))]>, TA;
7610 defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte>;
7611 defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1>;
7612 defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2>;
7615 defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2, 1>;
7617 defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1>;
7618 defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2>;
7621 // Aliases with explicit %xmm0
7622 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7623 (SHA256RNDS2rr VR128:$dst, VR128:$src2)>;
7624 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7625 (SHA256RNDS2rm VR128:$dst, i128mem:$src2)>;
7627 //===----------------------------------------------------------------------===//
7628 // AES-NI Instructions
7629 //===----------------------------------------------------------------------===//
7631 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
7632 PatFrag ld_frag, bit Is2Addr = 1> {
7633 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
7634 (ins VR128:$src1, VR128:$src2),
7636 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7637 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7638 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
7639 Sched<[WriteAESDecEnc]>;
7640 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
7641 (ins VR128:$src1, i128mem:$src2),
7643 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7644 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7646 (IntId128 VR128:$src1, (ld_frag addr:$src2)))]>,
7647 Sched<[WriteAESDecEncLd, ReadAfterLd]>;
7650 // Perform One Round of an AES Encryption/Decryption Flow
7651 let Predicates = [HasAVX, HasAES] in {
7652 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7653 int_x86_aesni_aesenc, loadv2i64, 0>, VEX_4V;
7654 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7655 int_x86_aesni_aesenclast, loadv2i64, 0>, VEX_4V;
7656 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7657 int_x86_aesni_aesdec, loadv2i64, 0>, VEX_4V;
7658 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7659 int_x86_aesni_aesdeclast, loadv2i64, 0>, VEX_4V;
7662 let Constraints = "$src1 = $dst" in {
7663 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7664 int_x86_aesni_aesenc, memopv2i64>;
7665 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7666 int_x86_aesni_aesenclast, memopv2i64>;
7667 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7668 int_x86_aesni_aesdec, memopv2i64>;
7669 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7670 int_x86_aesni_aesdeclast, memopv2i64>;
7673 // Perform the AES InvMixColumn Transformation
7674 let Predicates = [HasAVX, HasAES] in {
7675 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7677 "vaesimc\t{$src1, $dst|$dst, $src1}",
7679 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
7681 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7682 (ins i128mem:$src1),
7683 "vaesimc\t{$src1, $dst|$dst, $src1}",
7684 [(set VR128:$dst, (int_x86_aesni_aesimc (loadv2i64 addr:$src1)))]>,
7685 Sched<[WriteAESIMCLd]>, VEX;
7687 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7689 "aesimc\t{$src1, $dst|$dst, $src1}",
7691 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
7692 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7693 (ins i128mem:$src1),
7694 "aesimc\t{$src1, $dst|$dst, $src1}",
7695 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7696 Sched<[WriteAESIMCLd]>;
7698 // AES Round Key Generation Assist
7699 let Predicates = [HasAVX, HasAES] in {
7700 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7701 (ins VR128:$src1, u8imm:$src2),
7702 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7704 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7705 Sched<[WriteAESKeyGen]>, VEX;
7706 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7707 (ins i128mem:$src1, u8imm:$src2),
7708 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7710 (int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
7711 Sched<[WriteAESKeyGenLd]>, VEX;
7713 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7714 (ins VR128:$src1, u8imm:$src2),
7715 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7717 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7718 Sched<[WriteAESKeyGen]>;
7719 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7720 (ins i128mem:$src1, u8imm:$src2),
7721 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7723 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7724 Sched<[WriteAESKeyGenLd]>;
7726 //===----------------------------------------------------------------------===//
7727 // PCLMUL Instructions
7728 //===----------------------------------------------------------------------===//
7730 // AVX carry-less Multiplication instructions
7731 let isCommutable = 1 in
7732 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7733 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7734 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7736 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
7737 Sched<[WriteCLMul]>;
7739 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7740 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7741 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7742 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7743 (loadv2i64 addr:$src2), imm:$src3))]>,
7744 Sched<[WriteCLMulLd, ReadAfterLd]>;
7746 // Carry-less Multiplication instructions
7747 let Constraints = "$src1 = $dst" in {
7748 let isCommutable = 1 in
7749 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7750 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7751 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7753 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
7754 IIC_SSE_PCLMULQDQ_RR>, Sched<[WriteCLMul]>;
7756 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7757 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7758 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7759 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7760 (memopv2i64 addr:$src2), imm:$src3))],
7761 IIC_SSE_PCLMULQDQ_RM>,
7762 Sched<[WriteCLMulLd, ReadAfterLd]>;
7763 } // Constraints = "$src1 = $dst"
7766 multiclass pclmul_alias<string asm, int immop> {
7767 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7768 (PCLMULQDQrr VR128:$dst, VR128:$src, immop), 0>;
7770 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7771 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop), 0>;
7773 def : InstAlias<!strconcat("vpclmul", asm,
7774 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7775 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop),
7778 def : InstAlias<!strconcat("vpclmul", asm,
7779 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7780 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop),
7783 defm : pclmul_alias<"hqhq", 0x11>;
7784 defm : pclmul_alias<"hqlq", 0x01>;
7785 defm : pclmul_alias<"lqhq", 0x10>;
7786 defm : pclmul_alias<"lqlq", 0x00>;
7788 //===----------------------------------------------------------------------===//
7789 // SSE4A Instructions
7790 //===----------------------------------------------------------------------===//
7792 let Predicates = [HasSSE4A] in {
7794 let Constraints = "$src = $dst" in {
7795 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
7796 (ins VR128:$src, u8imm:$len, u8imm:$idx),
7797 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7798 [(set VR128:$dst, (X86extrqi VR128:$src, imm:$len,
7800 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7801 (ins VR128:$src, VR128:$mask),
7802 "extrq\t{$mask, $src|$src, $mask}",
7803 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7804 VR128:$mask))]>, PD;
7806 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7807 (ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
7808 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7809 [(set VR128:$dst, (X86insertqi VR128:$src, VR128:$src2,
7810 imm:$len, imm:$idx))]>, XD;
7811 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7812 (ins VR128:$src, VR128:$mask),
7813 "insertq\t{$mask, $src|$src, $mask}",
7814 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7815 VR128:$mask))]>, XD;
7818 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7819 "movntss\t{$src, $dst|$dst, $src}",
7820 [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
7822 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7823 "movntsd\t{$src, $dst|$dst, $src}",
7824 [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
7827 //===----------------------------------------------------------------------===//
7829 //===----------------------------------------------------------------------===//
7831 //===----------------------------------------------------------------------===//
7832 // VBROADCAST - Load from memory and broadcast to all elements of the
7833 // destination operand
7835 class avx_broadcast_rm<bits<8> opc, string OpcodeStr, RegisterClass RC,
7836 X86MemOperand x86memop, ValueType VT,
7837 PatFrag ld_frag, SchedWrite Sched> :
7838 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7839 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7840 [(set RC:$dst, (VT (X86VBroadcast (ld_frag addr:$src))))]>,
7841 Sched<[Sched]>, VEX;
7843 // AVX2 adds register forms
7844 class avx2_broadcast_rr<bits<8> opc, string OpcodeStr, RegisterClass RC,
7845 ValueType ResVT, ValueType OpVT, SchedWrite Sched> :
7846 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7847 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7848 [(set RC:$dst, (ResVT (X86VBroadcast (OpVT VR128:$src))))]>,
7849 Sched<[Sched]>, VEX;
7851 let ExeDomain = SSEPackedSingle in {
7852 def VBROADCASTSSrm : avx_broadcast_rm<0x18, "vbroadcastss", VR128,
7853 f32mem, v4f32, loadf32, WriteLoad>;
7854 def VBROADCASTSSYrm : avx_broadcast_rm<0x18, "vbroadcastss", VR256,
7855 f32mem, v8f32, loadf32,
7856 WriteFShuffleLd>, VEX_L;
7858 let ExeDomain = SSEPackedDouble in
7859 def VBROADCASTSDYrm : avx_broadcast_rm<0x19, "vbroadcastsd", VR256, f64mem,
7860 v4f64, loadf64, WriteFShuffleLd>, VEX_L;
7862 let ExeDomain = SSEPackedSingle in {
7863 def VBROADCASTSSrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR128,
7864 v4f32, v4f32, WriteFShuffle>;
7865 def VBROADCASTSSYrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR256,
7866 v8f32, v4f32, WriteFShuffle256>, VEX_L;
7868 let ExeDomain = SSEPackedDouble in
7869 def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
7870 v4f64, v2f64, WriteFShuffle256>, VEX_L;
7872 let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX2] in
7873 def VBROADCASTI128 : AVX8I<0x5A, MRMSrcMem, (outs VR256:$dst),
7875 "vbroadcasti128\t{$src, $dst|$dst, $src}", []>,
7876 Sched<[WriteLoad]>, VEX, VEX_L;
7878 def VBROADCASTF128 : AVX8I<0x1A, MRMSrcMem, (outs VR256:$dst),
7880 "vbroadcastf128\t{$src, $dst|$dst, $src}",
7882 (int_x86_avx_vbroadcastf128_pd_256 addr:$src))]>,
7883 Sched<[WriteFShuffleLd]>, VEX, VEX_L;
7885 let Predicates = [HasAVX] in
7886 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
7887 (VBROADCASTF128 addr:$src)>;
7890 //===----------------------------------------------------------------------===//
7891 // VINSERTF128 - Insert packed floating-point values
7893 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7894 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7895 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
7896 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7897 []>, Sched<[WriteFShuffle]>, VEX_4V, VEX_L;
7899 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7900 (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
7901 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7902 []>, Sched<[WriteFShuffleLd, ReadAfterLd]>, VEX_4V, VEX_L;
7905 let Predicates = [HasAVX, NoVLX] in {
7906 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
7908 (VINSERTF128rr VR256:$src1, VR128:$src2,
7909 (INSERT_get_vinsert128_imm VR256:$ins))>;
7910 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
7912 (VINSERTF128rr VR256:$src1, VR128:$src2,
7913 (INSERT_get_vinsert128_imm VR256:$ins))>;
7915 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
7917 (VINSERTF128rm VR256:$src1, addr:$src2,
7918 (INSERT_get_vinsert128_imm VR256:$ins))>;
7919 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
7921 (VINSERTF128rm VR256:$src1, addr:$src2,
7922 (INSERT_get_vinsert128_imm VR256:$ins))>;
7925 let Predicates = [HasAVX1Only] in {
7926 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7928 (VINSERTF128rr VR256:$src1, VR128:$src2,
7929 (INSERT_get_vinsert128_imm VR256:$ins))>;
7930 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7932 (VINSERTF128rr VR256:$src1, VR128:$src2,
7933 (INSERT_get_vinsert128_imm VR256:$ins))>;
7934 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7936 (VINSERTF128rr VR256:$src1, VR128:$src2,
7937 (INSERT_get_vinsert128_imm VR256:$ins))>;
7938 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7940 (VINSERTF128rr VR256:$src1, VR128:$src2,
7941 (INSERT_get_vinsert128_imm VR256:$ins))>;
7943 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
7945 (VINSERTF128rm VR256:$src1, addr:$src2,
7946 (INSERT_get_vinsert128_imm VR256:$ins))>;
7947 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
7948 (bc_v4i32 (loadv2i64 addr:$src2)),
7950 (VINSERTF128rm VR256:$src1, addr:$src2,
7951 (INSERT_get_vinsert128_imm VR256:$ins))>;
7952 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
7953 (bc_v16i8 (loadv2i64 addr:$src2)),
7955 (VINSERTF128rm VR256:$src1, addr:$src2,
7956 (INSERT_get_vinsert128_imm VR256:$ins))>;
7957 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
7958 (bc_v8i16 (loadv2i64 addr:$src2)),
7960 (VINSERTF128rm VR256:$src1, addr:$src2,
7961 (INSERT_get_vinsert128_imm VR256:$ins))>;
7964 //===----------------------------------------------------------------------===//
7965 // VEXTRACTF128 - Extract packed floating-point values
7967 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7968 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7969 (ins VR256:$src1, u8imm:$src2),
7970 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7971 []>, Sched<[WriteFShuffle]>, VEX, VEX_L;
7973 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7974 (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
7975 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7976 []>, Sched<[WriteStore]>, VEX, VEX_L;
7980 let Predicates = [HasAVX] in {
7981 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7982 (v4f32 (VEXTRACTF128rr
7983 (v8f32 VR256:$src1),
7984 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7985 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7986 (v2f64 (VEXTRACTF128rr
7987 (v4f64 VR256:$src1),
7988 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7990 def : Pat<(store (v4f32 (vextract128_extract:$ext (v8f32 VR256:$src1),
7991 (iPTR imm))), addr:$dst),
7992 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7993 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7994 def : Pat<(store (v2f64 (vextract128_extract:$ext (v4f64 VR256:$src1),
7995 (iPTR imm))), addr:$dst),
7996 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7997 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8000 let Predicates = [HasAVX1Only] in {
8001 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8002 (v2i64 (VEXTRACTF128rr
8003 (v4i64 VR256:$src1),
8004 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8005 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8006 (v4i32 (VEXTRACTF128rr
8007 (v8i32 VR256:$src1),
8008 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8009 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8010 (v8i16 (VEXTRACTF128rr
8011 (v16i16 VR256:$src1),
8012 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8013 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8014 (v16i8 (VEXTRACTF128rr
8015 (v32i8 VR256:$src1),
8016 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8018 def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8019 (iPTR imm))), addr:$dst),
8020 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8021 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8022 def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8023 (iPTR imm))), addr:$dst),
8024 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8025 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8026 def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8027 (iPTR imm))), addr:$dst),
8028 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8029 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8030 def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8031 (iPTR imm))), addr:$dst),
8032 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8033 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8036 //===----------------------------------------------------------------------===//
8037 // VMASKMOV - Conditional SIMD Packed Loads and Stores
8039 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
8040 Intrinsic IntLd, Intrinsic IntLd256,
8041 Intrinsic IntSt, Intrinsic IntSt256> {
8042 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
8043 (ins VR128:$src1, f128mem:$src2),
8044 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8045 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
8047 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
8048 (ins VR256:$src1, f256mem:$src2),
8049 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8050 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8052 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
8053 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
8054 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8055 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8056 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
8057 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
8058 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8059 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8062 let ExeDomain = SSEPackedSingle in
8063 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
8064 int_x86_avx_maskload_ps,
8065 int_x86_avx_maskload_ps_256,
8066 int_x86_avx_maskstore_ps,
8067 int_x86_avx_maskstore_ps_256>;
8068 let ExeDomain = SSEPackedDouble in
8069 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
8070 int_x86_avx_maskload_pd,
8071 int_x86_avx_maskload_pd_256,
8072 int_x86_avx_maskstore_pd,
8073 int_x86_avx_maskstore_pd_256>;
8075 //===----------------------------------------------------------------------===//
8076 // VPERMIL - Permute Single and Double Floating-Point Values
8078 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
8079 RegisterClass RC, X86MemOperand x86memop_f,
8080 X86MemOperand x86memop_i, PatFrag i_frag,
8081 Intrinsic IntVar, ValueType vt> {
8082 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
8083 (ins RC:$src1, RC:$src2),
8084 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8085 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V,
8086 Sched<[WriteFShuffle]>;
8087 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
8088 (ins RC:$src1, x86memop_i:$src2),
8089 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8090 [(set RC:$dst, (IntVar RC:$src1,
8091 (bitconvert (i_frag addr:$src2))))]>, VEX_4V,
8092 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8094 let Predicates = [HasAVX, NoVLX] in {
8095 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
8096 (ins RC:$src1, u8imm:$src2),
8097 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8098 [(set RC:$dst, (vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
8099 Sched<[WriteFShuffle]>;
8100 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
8101 (ins x86memop_f:$src1, u8imm:$src2),
8102 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8104 (vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
8105 Sched<[WriteFShuffleLd]>;
8106 }// Predicates = [HasAVX, NoVLX]
8109 let ExeDomain = SSEPackedSingle in {
8110 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
8111 loadv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
8112 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
8113 loadv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
8115 let ExeDomain = SSEPackedDouble in {
8116 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
8117 loadv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
8118 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
8119 loadv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
8122 let Predicates = [HasAVX, NoVLX] in {
8123 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (v8i32 VR256:$src2))),
8124 (VPERMILPSYrr VR256:$src1, VR256:$src2)>;
8125 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
8126 (VPERMILPSYrm VR256:$src1, addr:$src2)>;
8127 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (v4i64 VR256:$src2))),
8128 (VPERMILPDYrr VR256:$src1, VR256:$src2)>;
8129 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (loadv4i64 addr:$src2))),
8130 (VPERMILPDYrm VR256:$src1, addr:$src2)>;
8132 def : Pat<(v8i32 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8133 (VPERMILPSYri VR256:$src1, imm:$imm)>;
8134 def : Pat<(v4i64 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8135 (VPERMILPDYri VR256:$src1, imm:$imm)>;
8136 def : Pat<(v8i32 (X86VPermilpi (bc_v8i32 (loadv4i64 addr:$src1)),
8138 (VPERMILPSYmi addr:$src1, imm:$imm)>;
8139 def : Pat<(v4i64 (X86VPermilpi (loadv4i64 addr:$src1), (i8 imm:$imm))),
8140 (VPERMILPDYmi addr:$src1, imm:$imm)>;
8142 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (v4i32 VR128:$src2))),
8143 (VPERMILPSrr VR128:$src1, VR128:$src2)>;
8144 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)))),
8145 (VPERMILPSrm VR128:$src1, addr:$src2)>;
8146 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (v2i64 VR128:$src2))),
8147 (VPERMILPDrr VR128:$src1, VR128:$src2)>;
8148 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (loadv2i64 addr:$src2))),
8149 (VPERMILPDrm VR128:$src1, addr:$src2)>;
8151 def : Pat<(v2i64 (X86VPermilpi VR128:$src1, (i8 imm:$imm))),
8152 (VPERMILPDri VR128:$src1, imm:$imm)>;
8153 def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
8154 (VPERMILPDmi addr:$src1, imm:$imm)>;
8157 //===----------------------------------------------------------------------===//
8158 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
8160 let ExeDomain = SSEPackedSingle in {
8161 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
8162 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8163 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8164 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8165 (i8 imm:$src3))))]>, VEX_4V, VEX_L,
8166 Sched<[WriteFShuffle]>;
8167 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
8168 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8169 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8170 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
8171 (i8 imm:$src3)))]>, VEX_4V, VEX_L,
8172 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8175 let Predicates = [HasAVX] in {
8176 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8177 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8178 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
8179 (loadv4f64 addr:$src2), (i8 imm:$imm))),
8180 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8183 let Predicates = [HasAVX1Only] in {
8184 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8185 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8186 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8187 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8188 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8189 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8190 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8191 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8193 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
8194 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8195 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8196 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
8197 (loadv4i64 addr:$src2), (i8 imm:$imm))),
8198 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8199 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
8200 (bc_v32i8 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8201 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8202 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8203 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8204 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8207 //===----------------------------------------------------------------------===//
8208 // VZERO - Zero YMM registers
8210 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
8211 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
8212 // Zero All YMM registers
8213 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
8214 [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
8216 // Zero Upper bits of YMM registers
8217 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
8218 [(int_x86_avx_vzeroupper)]>, PS, VEX, Requires<[HasAVX]>;
8221 //===----------------------------------------------------------------------===//
8222 // Half precision conversion instructions
8223 //===----------------------------------------------------------------------===//
8224 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8225 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
8226 "vcvtph2ps\t{$src, $dst|$dst, $src}",
8227 [(set RC:$dst, (Int VR128:$src))]>,
8228 T8PD, VEX, Sched<[WriteCvtF2F]>;
8229 let hasSideEffects = 0, mayLoad = 1 in
8230 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
8231 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8PD, VEX,
8232 Sched<[WriteCvtF2FLd]>;
8235 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8236 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
8237 (ins RC:$src1, i32u8imm:$src2),
8238 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8239 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
8240 TAPD, VEX, Sched<[WriteCvtF2F]>;
8241 let hasSideEffects = 0, mayStore = 1,
8242 SchedRW = [WriteCvtF2FLd, WriteRMW] in
8243 def mr : Ii8<0x1D, MRMDestMem, (outs),
8244 (ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
8245 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8249 let Predicates = [HasF16C] in {
8250 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
8251 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
8252 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
8253 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
8255 // Pattern match vcvtph2ps of a scalar i64 load.
8256 def : Pat<(int_x86_vcvtph2ps_128 (vzmovl_v2i64 addr:$src)),
8257 (VCVTPH2PSrm addr:$src)>;
8258 def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)),
8259 (VCVTPH2PSrm addr:$src)>;
8261 def : Pat<(store (f64 (extractelt (bc_v2f64 (v8i16
8262 (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))),
8264 (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>;
8265 def : Pat<(store (i64 (extractelt (bc_v2i64 (v8i16
8266 (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))),
8268 (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>;
8269 def : Pat<(store (v8i16 (int_x86_vcvtps2ph_256 VR256:$src1, i32:$src2)),
8271 (VCVTPS2PHYmr addr:$dst, VR256:$src1, imm:$src2)>;
8274 // Patterns for matching conversions from float to half-float and vice versa.
8275 let Predicates = [HasF16C] in {
8276 def : Pat<(fp_to_f16 FR32:$src),
8277 (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
8278 (COPY_TO_REGCLASS FR32:$src, VR128), 0)), sub_16bit))>;
8280 def : Pat<(f16_to_fp GR16:$src),
8281 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8282 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)), FR32)) >;
8284 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
8285 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8286 (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 0)), FR32)) >;
8289 //===----------------------------------------------------------------------===//
8290 // AVX2 Instructions
8291 //===----------------------------------------------------------------------===//
8293 /// AVX2_binop_rmi - AVX2 binary operator with 8-bit immediate
8294 multiclass AVX2_binop_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
8295 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
8296 X86MemOperand x86memop> {
8297 let isCommutable = 1 in
8298 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
8299 (ins RC:$src1, RC:$src2, u8imm:$src3),
8300 !strconcat(OpcodeStr,
8301 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8302 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))]>,
8303 Sched<[WriteBlend]>, VEX_4V;
8304 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
8305 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
8306 !strconcat(OpcodeStr,
8307 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8309 (OpVT (OpNode RC:$src1,
8310 (bitconvert (memop_frag addr:$src2)), imm:$src3)))]>,
8311 Sched<[WriteBlendLd, ReadAfterLd]>, VEX_4V;
8314 defm VPBLENDD : AVX2_binop_rmi<0x02, "vpblendd", X86Blendi, v4i32,
8315 VR128, loadv2i64, i128mem>;
8316 defm VPBLENDDY : AVX2_binop_rmi<0x02, "vpblendd", X86Blendi, v8i32,
8317 VR256, loadv4i64, i256mem>, VEX_L;
8319 //===----------------------------------------------------------------------===//
8320 // VPBROADCAST - Load from memory and broadcast to all elements of the
8321 // destination operand
8323 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
8324 X86MemOperand x86memop, PatFrag ld_frag,
8325 ValueType OpVT128, ValueType OpVT256, Predicate prd> {
8326 let Predicates = [HasAVX2, prd] in {
8327 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
8328 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8330 (OpVT128 (X86VBroadcast (OpVT128 VR128:$src))))]>,
8331 Sched<[WriteShuffle]>, VEX;
8332 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
8333 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8335 (OpVT128 (X86VBroadcast (ld_frag addr:$src))))]>,
8336 Sched<[WriteLoad]>, VEX;
8337 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
8338 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8340 (OpVT256 (X86VBroadcast (OpVT128 VR128:$src))))]>,
8341 Sched<[WriteShuffle256]>, VEX, VEX_L;
8342 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
8343 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8345 (OpVT256 (X86VBroadcast (ld_frag addr:$src))))]>,
8346 Sched<[WriteLoad]>, VEX, VEX_L;
8348 // Provide aliases for broadcast from the same register class that
8349 // automatically does the extract.
8350 def : Pat<(OpVT256 (X86VBroadcast (OpVT256 VR256:$src))),
8351 (!cast<Instruction>(NAME#"Yrr")
8352 (OpVT128 (EXTRACT_SUBREG (OpVT256 VR256:$src),sub_xmm)))>;
8356 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
8357 v16i8, v32i8, NoVLX_Or_NoBWI>;
8358 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
8359 v8i16, v16i16, NoVLX_Or_NoBWI>;
8360 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
8361 v4i32, v8i32, NoVLX>;
8362 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
8363 v2i64, v4i64, NoVLX>;
8365 let Predicates = [HasAVX2] in {
8366 // loadi16 is tricky to fold, because !isTypeDesirableForOp, justifiably.
8367 // This means we'll encounter truncated i32 loads; match that here.
8368 def : Pat<(v8i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
8369 (VPBROADCASTWrm addr:$src)>;
8370 def : Pat<(v16i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
8371 (VPBROADCASTWYrm addr:$src)>;
8372 def : Pat<(v8i16 (X86VBroadcast
8373 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
8374 (VPBROADCASTWrm addr:$src)>;
8375 def : Pat<(v16i16 (X86VBroadcast
8376 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
8377 (VPBROADCASTWYrm addr:$src)>;
8379 // Provide aliases for broadcast from the same register class that
8380 // automatically does the extract.
8381 def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256:$src))),
8382 (VBROADCASTSSYrr (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src),
8384 def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
8385 (VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
8388 // Provide fallback in case the load node that is used in the patterns above
8389 // is used by additional users, which prevents the pattern selection.
8390 let AddedComplexity = 20 in {
8391 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8392 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8393 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8394 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8395 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8396 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8398 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8399 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8400 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8401 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8402 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8403 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8405 def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
8406 (VPBROADCASTBrr (COPY_TO_REGCLASS
8407 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8409 def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
8410 (VPBROADCASTBYrr (COPY_TO_REGCLASS
8411 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8414 def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
8415 (VPBROADCASTWrr (COPY_TO_REGCLASS
8416 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8418 def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
8419 (VPBROADCASTWYrr (COPY_TO_REGCLASS
8420 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8423 // The patterns for VPBROADCASTD are not needed because they would match
8424 // the exact same thing as VBROADCASTSS patterns.
8426 def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
8427 (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8428 // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
8432 // AVX1 broadcast patterns
8433 let Predicates = [HasAVX1Only] in {
8434 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8435 (VBROADCASTSSYrm addr:$src)>;
8436 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8437 (VBROADCASTSDYrm addr:$src)>;
8438 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8439 (VBROADCASTSSrm addr:$src)>;
8442 let Predicates = [HasAVX] in {
8443 // Provide fallback in case the load node that is used in the patterns above
8444 // is used by additional users, which prevents the pattern selection.
8445 let AddedComplexity = 20 in {
8446 // 128bit broadcasts:
8447 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8448 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
8449 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8450 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
8451 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
8452 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
8453 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8454 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
8455 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
8456 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
8458 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8459 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
8460 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8461 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
8462 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
8463 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
8464 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8465 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
8466 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
8467 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
8470 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
8471 (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8472 def : Pat<(v2i64 (X86VBroadcast i64:$src)),
8473 (VMOVDDUPrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8476 //===----------------------------------------------------------------------===//
8477 // VPERM - Permute instructions
8480 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8481 ValueType OpVT, X86FoldableSchedWrite Sched> {
8482 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8483 (ins VR256:$src1, VR256:$src2),
8484 !strconcat(OpcodeStr,
8485 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8487 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
8488 Sched<[Sched]>, VEX_4V, VEX_L;
8489 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8490 (ins VR256:$src1, i256mem:$src2),
8491 !strconcat(OpcodeStr,
8492 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8494 (OpVT (X86VPermv VR256:$src1,
8495 (bitconvert (mem_frag addr:$src2)))))]>,
8496 Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
8499 defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32, WriteShuffle256>;
8500 let ExeDomain = SSEPackedSingle in
8501 defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
8503 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8504 ValueType OpVT, X86FoldableSchedWrite Sched> {
8505 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
8506 (ins VR256:$src1, u8imm:$src2),
8507 !strconcat(OpcodeStr,
8508 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8510 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8511 Sched<[Sched]>, VEX, VEX_L;
8512 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8513 (ins i256mem:$src1, u8imm:$src2),
8514 !strconcat(OpcodeStr,
8515 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8517 (OpVT (X86VPermi (mem_frag addr:$src1),
8518 (i8 imm:$src2))))]>,
8519 Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
8522 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
8523 WriteShuffle256>, VEX_W;
8524 let ExeDomain = SSEPackedDouble in
8525 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
8526 WriteFShuffle256>, VEX_W;
8528 //===----------------------------------------------------------------------===//
8529 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8531 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8532 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8533 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8534 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8535 (i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
8537 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8538 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8539 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8540 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
8542 Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8544 let Predicates = [HasAVX2] in {
8545 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8546 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8547 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8548 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8549 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8550 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8552 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (loadv4i64 addr:$src2)),
8554 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8555 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8556 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8557 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8558 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)),
8560 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8564 //===----------------------------------------------------------------------===//
8565 // VINSERTI128 - Insert packed integer values
8567 let hasSideEffects = 0 in {
8568 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8569 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
8570 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8571 []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
8573 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8574 (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
8575 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8576 []>, Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8579 let Predicates = [HasAVX2, NoVLX] in {
8580 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8582 (VINSERTI128rr VR256:$src1, VR128:$src2,
8583 (INSERT_get_vinsert128_imm VR256:$ins))>;
8584 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8586 (VINSERTI128rr VR256:$src1, VR128:$src2,
8587 (INSERT_get_vinsert128_imm VR256:$ins))>;
8588 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8590 (VINSERTI128rr VR256:$src1, VR128:$src2,
8591 (INSERT_get_vinsert128_imm VR256:$ins))>;
8592 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8594 (VINSERTI128rr VR256:$src1, VR128:$src2,
8595 (INSERT_get_vinsert128_imm VR256:$ins))>;
8597 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
8599 (VINSERTI128rm VR256:$src1, addr:$src2,
8600 (INSERT_get_vinsert128_imm VR256:$ins))>;
8601 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
8602 (bc_v4i32 (loadv2i64 addr:$src2)),
8604 (VINSERTI128rm VR256:$src1, addr:$src2,
8605 (INSERT_get_vinsert128_imm VR256:$ins))>;
8606 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
8607 (bc_v16i8 (loadv2i64 addr:$src2)),
8609 (VINSERTI128rm VR256:$src1, addr:$src2,
8610 (INSERT_get_vinsert128_imm VR256:$ins))>;
8611 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
8612 (bc_v8i16 (loadv2i64 addr:$src2)),
8614 (VINSERTI128rm VR256:$src1, addr:$src2,
8615 (INSERT_get_vinsert128_imm VR256:$ins))>;
8618 //===----------------------------------------------------------------------===//
8619 // VEXTRACTI128 - Extract packed integer values
8621 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8622 (ins VR256:$src1, u8imm:$src2),
8623 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8624 Sched<[WriteShuffle256]>, VEX, VEX_L;
8625 let hasSideEffects = 0, mayStore = 1 in
8626 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8627 (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
8628 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8629 Sched<[WriteStore]>, VEX, VEX_L;
8631 let Predicates = [HasAVX2] in {
8632 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8633 (v2i64 (VEXTRACTI128rr
8634 (v4i64 VR256:$src1),
8635 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8636 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8637 (v4i32 (VEXTRACTI128rr
8638 (v8i32 VR256:$src1),
8639 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8640 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8641 (v8i16 (VEXTRACTI128rr
8642 (v16i16 VR256:$src1),
8643 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8644 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8645 (v16i8 (VEXTRACTI128rr
8646 (v32i8 VR256:$src1),
8647 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8649 def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8650 (iPTR imm))), addr:$dst),
8651 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8652 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8653 def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8654 (iPTR imm))), addr:$dst),
8655 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8656 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8657 def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8658 (iPTR imm))), addr:$dst),
8659 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8660 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8661 def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8662 (iPTR imm))), addr:$dst),
8663 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8664 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8667 //===----------------------------------------------------------------------===//
8668 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8670 multiclass avx2_pmovmask<string OpcodeStr,
8671 Intrinsic IntLd128, Intrinsic IntLd256,
8672 Intrinsic IntSt128, Intrinsic IntSt256> {
8673 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8674 (ins VR128:$src1, i128mem:$src2),
8675 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8676 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
8677 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8678 (ins VR256:$src1, i256mem:$src2),
8679 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8680 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8682 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8683 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8684 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8685 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8686 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8687 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8688 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8689 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8692 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8693 int_x86_avx2_maskload_d,
8694 int_x86_avx2_maskload_d_256,
8695 int_x86_avx2_maskstore_d,
8696 int_x86_avx2_maskstore_d_256>;
8697 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8698 int_x86_avx2_maskload_q,
8699 int_x86_avx2_maskload_q_256,
8700 int_x86_avx2_maskstore_q,
8701 int_x86_avx2_maskstore_q_256>, VEX_W;
8703 def: Pat<(X86mstore addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src)),
8704 (VMASKMOVPSYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8706 def: Pat<(X86mstore addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src)),
8707 (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8709 def: Pat<(X86mstore addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src)),
8710 (VMASKMOVPSmr addr:$ptr, VR128:$mask, VR128:$src)>;
8712 def: Pat<(X86mstore addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src)),
8713 (VPMASKMOVDmr addr:$ptr, VR128:$mask, VR128:$src)>;
8715 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8716 (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
8718 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask),
8719 (bc_v8f32 (v8i32 immAllZerosV)))),
8720 (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
8722 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src0))),
8723 (VBLENDVPSYrr VR256:$src0, (VMASKMOVPSYrm VR256:$mask, addr:$ptr),
8726 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8727 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8729 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 immAllZerosV))),
8730 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8732 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src0))),
8733 (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
8736 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
8737 (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
8739 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask),
8740 (bc_v4f32 (v4i32 immAllZerosV)))),
8741 (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
8743 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src0))),
8744 (VBLENDVPSrr VR128:$src0, (VMASKMOVPSrm VR128:$mask, addr:$ptr),
8747 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
8748 (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
8750 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 immAllZerosV))),
8751 (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
8753 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src0))),
8754 (VBLENDVPSrr VR128:$src0, (VPMASKMOVDrm VR128:$mask, addr:$ptr),
8757 def: Pat<(X86mstore addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src)),
8758 (VMASKMOVPDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8760 def: Pat<(X86mstore addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src)),
8761 (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8763 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8764 (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
8766 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8767 (v4f64 immAllZerosV))),
8768 (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
8770 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src0))),
8771 (VBLENDVPDYrr VR256:$src0, (VMASKMOVPDYrm VR256:$mask, addr:$ptr),
8774 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8775 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8777 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8778 (bc_v4i64 (v8i32 immAllZerosV)))),
8779 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8781 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src0))),
8782 (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
8785 def: Pat<(X86mstore addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src)),
8786 (VMASKMOVPDmr addr:$ptr, VR128:$mask, VR128:$src)>;
8788 def: Pat<(X86mstore addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src)),
8789 (VPMASKMOVQmr addr:$ptr, VR128:$mask, VR128:$src)>;
8791 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
8792 (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
8794 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
8795 (v2f64 immAllZerosV))),
8796 (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
8798 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src0))),
8799 (VBLENDVPDrr VR128:$src0, (VMASKMOVPDrm VR128:$mask, addr:$ptr),
8802 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
8803 (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
8805 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
8806 (bc_v2i64 (v4i32 immAllZerosV)))),
8807 (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
8809 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src0))),
8810 (VBLENDVPDrr VR128:$src0, (VPMASKMOVQrm VR128:$mask, addr:$ptr),
8813 //===----------------------------------------------------------------------===//
8814 // Variable Bit Shifts
8816 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
8817 ValueType vt128, ValueType vt256> {
8818 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
8819 (ins VR128:$src1, VR128:$src2),
8820 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8822 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
8823 VEX_4V, Sched<[WriteVarVecShift]>;
8824 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
8825 (ins VR128:$src1, i128mem:$src2),
8826 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8828 (vt128 (OpNode VR128:$src1,
8829 (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
8830 VEX_4V, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8831 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8832 (ins VR256:$src1, VR256:$src2),
8833 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8835 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
8836 VEX_4V, VEX_L, Sched<[WriteVarVecShift]>;
8837 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8838 (ins VR256:$src1, i256mem:$src2),
8839 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8841 (vt256 (OpNode VR256:$src1,
8842 (vt256 (bitconvert (loadv4i64 addr:$src2))))))]>,
8843 VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8846 let Predicates = [HasAVX2, NoVLX] in {
8847 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
8848 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
8849 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
8850 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
8851 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
8853 //===----------------------------------------------------------------------===//
8854 // VGATHER - GATHER Operations
8855 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
8856 X86MemOperand memop128, X86MemOperand memop256> {
8857 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
8858 (ins VR128:$src1, memop128:$src2, VR128:$mask),
8859 !strconcat(OpcodeStr,
8860 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8862 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
8863 (ins RC256:$src1, memop256:$src2, RC256:$mask),
8864 !strconcat(OpcodeStr,
8865 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8866 []>, VEX_4VOp3, VEX_L;
8869 let mayLoad = 1, Constraints
8870 = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
8872 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
8873 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
8874 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
8875 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
8877 let ExeDomain = SSEPackedDouble in {
8878 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
8879 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
8882 let ExeDomain = SSEPackedSingle in {
8883 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
8884 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
8888 //===----------------------------------------------------------------------===//
8889 // Extra selection patterns for FR128, f128, f128mem
8891 // movaps is shorter than movdqa. movaps is in SSE and movdqa is in SSE2.
8892 def : Pat<(store (f128 FR128:$src), addr:$dst),
8893 (MOVAPSmr addr:$dst, (COPY_TO_REGCLASS (f128 FR128:$src), VR128))>;
8895 def : Pat<(loadf128 addr:$src),
8896 (COPY_TO_REGCLASS (MOVAPSrm addr:$src), FR128)>;
8898 // andps is shorter than andpd or pand. andps is SSE and andpd/pand are in SSE2
8899 def : Pat<(X86fand FR128:$src1, (loadf128 addr:$src2)),
8901 (ANDPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
8904 def : Pat<(X86fand FR128:$src1, FR128:$src2),
8906 (ANDPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8907 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8909 def : Pat<(and FR128:$src1, FR128:$src2),
8911 (ANDPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8912 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8914 def : Pat<(X86for FR128:$src1, (loadf128 addr:$src2)),
8916 (ORPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
8919 def : Pat<(X86for FR128:$src1, FR128:$src2),
8921 (ORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8922 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8924 def : Pat<(or FR128:$src1, FR128:$src2),
8926 (ORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8927 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8929 def : Pat<(X86fxor FR128:$src1, (loadf128 addr:$src2)),
8931 (XORPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
8934 def : Pat<(X86fxor FR128:$src1, FR128:$src2),
8936 (XORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8937 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8939 def : Pat<(xor FR128:$src1, FR128:$src2),
8941 (XORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8942 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;