1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
19 // InstrSchedModel info.
20 X86FoldableSchedWrite Sched = WriteFAdd;
23 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
29 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
30 InstrItinClass arg_ri> {
31 InstrItinClass rr = arg_rr;
32 InstrItinClass rm = arg_rm;
33 InstrItinClass ri = arg_ri;
38 let Sched = WriteFAdd in {
39 def SSE_ALU_F32S : OpndItins<
40 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
43 def SSE_ALU_F64S : OpndItins<
44 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
48 def SSE_ALU_ITINS_S : SizeItins<
49 SSE_ALU_F32S, SSE_ALU_F64S
52 let Sched = WriteFMul in {
53 def SSE_MUL_F32S : OpndItins<
54 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
57 def SSE_MUL_F64S : OpndItins<
58 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
62 def SSE_MUL_ITINS_S : SizeItins<
63 SSE_MUL_F32S, SSE_MUL_F64S
66 let Sched = WriteFDiv in {
67 def SSE_DIV_F32S : OpndItins<
68 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
71 def SSE_DIV_F64S : OpndItins<
72 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
76 def SSE_DIV_ITINS_S : SizeItins<
77 SSE_DIV_F32S, SSE_DIV_F64S
81 let Sched = WriteFAdd in {
82 def SSE_ALU_F32P : OpndItins<
83 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
86 def SSE_ALU_F64P : OpndItins<
87 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
91 def SSE_ALU_ITINS_P : SizeItins<
92 SSE_ALU_F32P, SSE_ALU_F64P
95 let Sched = WriteFMul in {
96 def SSE_MUL_F32P : OpndItins<
97 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
100 def SSE_MUL_F64P : OpndItins<
101 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
105 def SSE_MUL_ITINS_P : SizeItins<
106 SSE_MUL_F32P, SSE_MUL_F64P
109 let Sched = WriteFDiv in {
110 def SSE_DIV_F32P : OpndItins<
111 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
114 def SSE_DIV_F64P : OpndItins<
115 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
119 def SSE_DIV_ITINS_P : SizeItins<
120 SSE_DIV_F32P, SSE_DIV_F64P
123 let Sched = WriteVecLogic in
124 def SSE_VEC_BIT_ITINS_P : OpndItins<
125 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
128 def SSE_BIT_ITINS_P : OpndItins<
129 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
132 let Sched = WriteVecALU in {
133 def SSE_INTALU_ITINS_P : OpndItins<
134 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
137 def SSE_INTALUQ_ITINS_P : OpndItins<
138 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
142 let Sched = WriteVecIMul in
143 def SSE_INTMUL_ITINS_P : OpndItins<
144 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
147 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
148 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
151 def SSE_MOVA_ITINS : OpndItins<
152 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
155 def SSE_MOVU_ITINS : OpndItins<
156 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
159 def SSE_DPPD_ITINS : OpndItins<
160 IIC_SSE_DPPD_RR, IIC_SSE_DPPD_RM
163 def SSE_DPPS_ITINS : OpndItins<
164 IIC_SSE_DPPS_RR, IIC_SSE_DPPD_RM
167 def DEFAULT_ITINS : OpndItins<
168 IIC_ALU_NONMEM, IIC_ALU_MEM
171 def SSE_EXTRACT_ITINS : OpndItins<
172 IIC_SSE_EXTRACTPS_RR, IIC_SSE_EXTRACTPS_RM
175 def SSE_INSERT_ITINS : OpndItins<
176 IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
179 let Sched = WriteMPSAD in
180 def SSE_MPSADBW_ITINS : OpndItins<
181 IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
184 let Sched = WriteVecIMul in
185 def SSE_PMULLD_ITINS : OpndItins<
186 IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
189 // Definitions for backward compatibility.
190 // The instructions mapped on these definitions uses a different itinerary
191 // than the actual scheduling model.
192 let Sched = WriteShuffle in
193 def DEFAULT_ITINS_SHUFFLESCHED : OpndItins<
194 IIC_ALU_NONMEM, IIC_ALU_MEM
197 let Sched = WriteVecIMul in
198 def DEFAULT_ITINS_VECIMULSCHED : OpndItins<
199 IIC_ALU_NONMEM, IIC_ALU_MEM
202 let Sched = WriteShuffle in
203 def SSE_INTALU_ITINS_SHUFF_P : OpndItins<
204 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
207 let Sched = WriteMPSAD in
208 def DEFAULT_ITINS_MPSADSCHED : OpndItins<
209 IIC_ALU_NONMEM, IIC_ALU_MEM
212 let Sched = WriteFBlend in
213 def DEFAULT_ITINS_FBLENDSCHED : OpndItins<
214 IIC_ALU_NONMEM, IIC_ALU_MEM
217 let Sched = WriteBlend in
218 def DEFAULT_ITINS_BLENDSCHED : OpndItins<
219 IIC_ALU_NONMEM, IIC_ALU_MEM
222 let Sched = WriteVarBlend in
223 def DEFAULT_ITINS_VARBLENDSCHED : OpndItins<
224 IIC_ALU_NONMEM, IIC_ALU_MEM
227 let Sched = WriteFBlend in
228 def SSE_INTALU_ITINS_FBLEND_P : OpndItins<
229 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
232 let Sched = WriteBlend in
233 def SSE_INTALU_ITINS_BLEND_P : OpndItins<
234 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
237 //===----------------------------------------------------------------------===//
238 // SSE 1 & 2 Instructions Classes
239 //===----------------------------------------------------------------------===//
241 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
242 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
243 RegisterClass RC, X86MemOperand x86memop,
246 let isCommutable = 1 in {
247 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
249 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
250 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
251 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr>,
252 Sched<[itins.Sched]>;
254 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
256 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
257 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
258 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm>,
259 Sched<[itins.Sched.Folded, ReadAfterLd]>;
262 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
263 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
264 string asm, string SSEVer, string FPSizeStr,
265 Operand memopr, ComplexPattern mem_cpat,
268 let isCodeGenOnly = 1 in {
269 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
271 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
272 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
273 [(set RC:$dst, (!cast<Intrinsic>(
274 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
275 RC:$src1, RC:$src2))], itins.rr>,
276 Sched<[itins.Sched]>;
277 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
279 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
280 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
281 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
282 SSEVer, "_", OpcodeStr, FPSizeStr))
283 RC:$src1, mem_cpat:$src2))], itins.rm>,
284 Sched<[itins.Sched.Folded, ReadAfterLd]>;
288 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
289 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
290 RegisterClass RC, ValueType vt,
291 X86MemOperand x86memop, PatFrag mem_frag,
292 Domain d, OpndItins itins, bit Is2Addr = 1> {
293 let isCommutable = 1 in
294 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
296 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
297 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
298 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
299 Sched<[itins.Sched]>;
301 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
303 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
304 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
305 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
307 Sched<[itins.Sched.Folded, ReadAfterLd]>;
310 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
311 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
312 string OpcodeStr, X86MemOperand x86memop,
313 list<dag> pat_rr, list<dag> pat_rm,
315 let isCommutable = 1, hasSideEffects = 0 in
316 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
318 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
319 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
320 pat_rr, NoItinerary, d>,
321 Sched<[WriteVecLogic]>;
322 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
324 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
325 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
326 pat_rm, NoItinerary, d>,
327 Sched<[WriteVecLogicLd, ReadAfterLd]>;
330 //===----------------------------------------------------------------------===//
331 // Non-instruction patterns
332 //===----------------------------------------------------------------------===//
334 // A vector extract of the first f32/f64 position is a subregister copy
335 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
336 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
337 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
338 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
340 // A 128-bit subvector extract from the first 256-bit vector position
341 // is a subregister copy that needs no instruction.
342 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
343 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
344 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
345 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
347 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
348 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
349 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
350 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
352 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
353 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
354 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
355 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
357 // A 128-bit subvector insert to the first 256-bit vector position
358 // is a subregister copy that needs no instruction.
359 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
360 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
361 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
362 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
363 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
364 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
365 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
366 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
367 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
368 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
369 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
370 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
371 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
374 // Implicitly promote a 32-bit scalar to a vector.
375 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
376 (COPY_TO_REGCLASS FR32:$src, VR128)>;
377 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
378 (COPY_TO_REGCLASS FR32:$src, VR128)>;
379 // Implicitly promote a 64-bit scalar to a vector.
380 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
381 (COPY_TO_REGCLASS FR64:$src, VR128)>;
382 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
383 (COPY_TO_REGCLASS FR64:$src, VR128)>;
385 // Bitcasts between 128-bit vector types. Return the original type since
386 // no instruction is needed for the conversion
387 let Predicates = [HasSSE2] in {
388 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
389 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
390 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
391 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
392 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
393 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
394 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
395 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
396 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
397 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
398 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
399 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
400 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
401 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
402 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
403 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
404 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
405 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
406 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
407 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
408 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
409 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
410 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
411 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
412 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
413 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
414 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
415 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
416 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
417 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
420 // Bitcasts between 256-bit vector types. Return the original type since
421 // no instruction is needed for the conversion
422 let Predicates = [HasAVX] in {
423 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
424 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
425 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
426 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
427 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
428 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
429 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
430 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
431 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
432 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
433 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
434 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
435 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
436 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
437 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
438 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
439 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
440 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
441 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
442 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
443 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
444 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
445 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
446 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
447 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
448 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
449 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
450 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
451 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
452 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
455 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
456 // This is expanded by ExpandPostRAPseudos.
457 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
458 isPseudo = 1, SchedRW = [WriteZero] in {
459 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
460 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
461 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
462 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
465 //===----------------------------------------------------------------------===//
466 // AVX & SSE - Zero/One Vectors
467 //===----------------------------------------------------------------------===//
469 // Alias instruction that maps zero vector to pxor / xorp* for sse.
470 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
471 // swizzled by ExecutionDepsFix to pxor.
472 // We set canFoldAsLoad because this can be converted to a constant-pool
473 // load of an all-zeros value if folding it would be beneficial.
474 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
475 isPseudo = 1, SchedRW = [WriteZero] in {
476 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
477 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
480 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
481 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
482 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
483 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
484 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
487 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
488 // and doesn't need it because on sandy bridge the register is set to zero
489 // at the rename stage without using any execution unit, so SET0PSY
490 // and SET0PDY can be used for vector int instructions without penalty
491 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
492 isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
493 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
494 [(set VR256:$dst, (v8f32 immAllZerosV))]>;
497 let Predicates = [HasAVX] in
498 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
500 let Predicates = [HasAVX2] in {
501 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
502 def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
503 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
504 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
507 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
508 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
509 let Predicates = [HasAVX1Only] in {
510 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
511 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
512 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
514 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
515 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
516 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
518 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
519 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
520 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
522 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
523 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
524 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
527 // We set canFoldAsLoad because this can be converted to a constant-pool
528 // load of an all-ones value if folding it would be beneficial.
529 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
530 isPseudo = 1, SchedRW = [WriteZero] in {
531 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
532 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
533 let Predicates = [HasAVX2] in
534 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
535 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
539 //===----------------------------------------------------------------------===//
540 // SSE 1 & 2 - Move FP Scalar Instructions
542 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
543 // register copies because it's a partial register update; Register-to-register
544 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
545 // that the insert be implementable in terms of a copy, and just mentioned, we
546 // don't use movss/movsd for copies.
547 //===----------------------------------------------------------------------===//
549 multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
550 X86MemOperand x86memop, string base_opc,
551 string asm_opr, Domain d = GenericDomain> {
552 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
553 (ins VR128:$src1, RC:$src2),
554 !strconcat(base_opc, asm_opr),
555 [(set VR128:$dst, (vt (OpNode VR128:$src1,
556 (scalar_to_vector RC:$src2))))],
557 IIC_SSE_MOV_S_RR, d>, Sched<[WriteFShuffle]>;
559 // For the disassembler
560 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
561 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
562 (ins VR128:$src1, RC:$src2),
563 !strconcat(base_opc, asm_opr),
564 [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
567 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
568 X86MemOperand x86memop, string OpcodeStr,
569 Domain d = GenericDomain> {
571 defm V#NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
572 "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
575 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
576 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
577 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
578 VEX, VEX_LIG, Sched<[WriteStore]>;
580 let Constraints = "$src1 = $dst" in {
581 defm NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
582 "\t{$src2, $dst|$dst, $src2}", d>;
585 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
586 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
587 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
591 // Loading from memory automatically zeroing upper bits.
592 multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
593 PatFrag mem_pat, string OpcodeStr,
594 Domain d = GenericDomain> {
595 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
596 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
597 [(set RC:$dst, (mem_pat addr:$src))],
598 IIC_SSE_MOV_S_RM, d>, VEX, VEX_LIG, Sched<[WriteLoad]>;
599 def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
600 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
601 [(set RC:$dst, (mem_pat addr:$src))],
602 IIC_SSE_MOV_S_RM, d>, Sched<[WriteLoad]>;
605 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
606 SSEPackedSingle>, XS;
607 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
608 SSEPackedDouble>, XD;
610 let canFoldAsLoad = 1, isReMaterializable = 1 in {
611 defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss",
612 SSEPackedSingle>, XS;
614 let AddedComplexity = 20 in
615 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
616 SSEPackedDouble>, XD;
620 let Predicates = [UseAVX] in {
621 let AddedComplexity = 20 in {
622 // MOVSSrm zeros the high parts of the register; represent this
623 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
624 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
625 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
626 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
627 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
628 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
629 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
631 // MOVSDrm zeros the high parts of the register; represent this
632 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
633 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
634 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
635 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
636 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
637 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
638 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
639 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
640 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
641 def : Pat<(v2f64 (X86vzload addr:$src)),
642 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
644 // Represent the same patterns above but in the form they appear for
646 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
647 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
648 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
649 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
650 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
651 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
652 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
653 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
654 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
656 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
657 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
658 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
660 // Extract and store.
661 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
663 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
664 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
666 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
668 // Shuffle with VMOVSS
669 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
670 (VMOVSSrr (v4i32 VR128:$src1),
671 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
672 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
673 (VMOVSSrr (v4f32 VR128:$src1),
674 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
677 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
678 (SUBREG_TO_REG (i32 0),
679 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
680 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
682 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
683 (SUBREG_TO_REG (i32 0),
684 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
685 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
688 // Shuffle with VMOVSD
689 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
690 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
691 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
692 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
693 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
694 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
695 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
696 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
699 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
700 (SUBREG_TO_REG (i32 0),
701 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
702 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
704 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
705 (SUBREG_TO_REG (i32 0),
706 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
707 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
710 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
711 // is during lowering, where it's not possible to recognize the fold cause
712 // it has two uses through a bitcast. One use disappears at isel time and the
713 // fold opportunity reappears.
714 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
715 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
716 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
717 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
718 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
719 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
720 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
721 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
724 let Predicates = [UseSSE1] in {
725 let Predicates = [NoSSE41], AddedComplexity = 15 in {
726 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
727 // MOVSS to the lower bits.
728 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
729 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
730 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
731 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
732 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
733 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
736 let AddedComplexity = 20 in {
737 // MOVSSrm already zeros the high parts of the register.
738 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
739 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
740 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
741 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
742 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
743 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
746 // Extract and store.
747 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
749 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
751 // Shuffle with MOVSS
752 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
753 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
754 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
755 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
758 let Predicates = [UseSSE2] in {
759 let Predicates = [NoSSE41], AddedComplexity = 15 in {
760 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
761 // MOVSD to the lower bits.
762 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
763 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
766 let AddedComplexity = 20 in {
767 // MOVSDrm already zeros the high parts of the register.
768 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
769 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
770 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
771 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
772 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
773 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
774 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
775 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
776 def : Pat<(v2f64 (X86vzload addr:$src)),
777 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
780 // Extract and store.
781 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
783 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
785 // Shuffle with MOVSD
786 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
787 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
788 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
789 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
790 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
791 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
792 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
793 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
795 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
796 // is during lowering, where it's not possible to recognize the fold cause
797 // it has two uses through a bitcast. One use disappears at isel time and the
798 // fold opportunity reappears.
799 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
800 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
801 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
802 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
803 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
804 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
805 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
806 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
809 //===----------------------------------------------------------------------===//
810 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
811 //===----------------------------------------------------------------------===//
813 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
814 X86MemOperand x86memop, PatFrag ld_frag,
815 string asm, Domain d,
817 bit IsReMaterializable = 1> {
818 let hasSideEffects = 0 in
819 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
820 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
821 Sched<[WriteFShuffle]>;
822 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
823 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
824 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
825 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>,
829 let Predicates = [HasAVX, NoVLX] in {
830 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
831 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
833 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
834 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
836 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
837 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
839 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
840 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
843 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
844 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
846 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
847 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
849 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
850 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
852 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
853 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
857 let Predicates = [UseSSE1] in {
858 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
859 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
861 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
862 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
865 let Predicates = [UseSSE2] in {
866 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
867 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
869 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
870 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
874 let SchedRW = [WriteStore], Predicates = [HasAVX, NoVLX] in {
875 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
876 "movaps\t{$src, $dst|$dst, $src}",
877 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
878 IIC_SSE_MOVA_P_MR>, VEX;
879 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
880 "movapd\t{$src, $dst|$dst, $src}",
881 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
882 IIC_SSE_MOVA_P_MR>, VEX;
883 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
884 "movups\t{$src, $dst|$dst, $src}",
885 [(store (v4f32 VR128:$src), addr:$dst)],
886 IIC_SSE_MOVU_P_MR>, VEX;
887 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
888 "movupd\t{$src, $dst|$dst, $src}",
889 [(store (v2f64 VR128:$src), addr:$dst)],
890 IIC_SSE_MOVU_P_MR>, VEX;
891 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
892 "movaps\t{$src, $dst|$dst, $src}",
893 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
894 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
895 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
896 "movapd\t{$src, $dst|$dst, $src}",
897 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
898 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
899 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
900 "movups\t{$src, $dst|$dst, $src}",
901 [(store (v8f32 VR256:$src), addr:$dst)],
902 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
903 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
904 "movupd\t{$src, $dst|$dst, $src}",
905 [(store (v4f64 VR256:$src), addr:$dst)],
906 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
910 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
911 SchedRW = [WriteFShuffle] in {
912 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
914 "movaps\t{$src, $dst|$dst, $src}", [],
915 IIC_SSE_MOVA_P_RR>, VEX;
916 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
918 "movapd\t{$src, $dst|$dst, $src}", [],
919 IIC_SSE_MOVA_P_RR>, VEX;
920 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
922 "movups\t{$src, $dst|$dst, $src}", [],
923 IIC_SSE_MOVU_P_RR>, VEX;
924 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
926 "movupd\t{$src, $dst|$dst, $src}", [],
927 IIC_SSE_MOVU_P_RR>, VEX;
928 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
930 "movaps\t{$src, $dst|$dst, $src}", [],
931 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
932 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
934 "movapd\t{$src, $dst|$dst, $src}", [],
935 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
936 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
938 "movups\t{$src, $dst|$dst, $src}", [],
939 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
940 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
942 "movupd\t{$src, $dst|$dst, $src}", [],
943 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
946 let Predicates = [HasAVX] in {
947 def : Pat<(v8i32 (X86vzmovl
948 (insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))),
949 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
950 def : Pat<(v4i64 (X86vzmovl
951 (insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))),
952 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
953 def : Pat<(v8f32 (X86vzmovl
954 (insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))),
955 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
956 def : Pat<(v4f64 (X86vzmovl
957 (insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))),
958 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
962 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
963 (VMOVUPSYmr addr:$dst, VR256:$src)>;
964 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
965 (VMOVUPDYmr addr:$dst, VR256:$src)>;
967 let SchedRW = [WriteStore] in {
968 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
969 "movaps\t{$src, $dst|$dst, $src}",
970 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
972 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
973 "movapd\t{$src, $dst|$dst, $src}",
974 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
976 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
977 "movups\t{$src, $dst|$dst, $src}",
978 [(store (v4f32 VR128:$src), addr:$dst)],
980 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
981 "movupd\t{$src, $dst|$dst, $src}",
982 [(store (v2f64 VR128:$src), addr:$dst)],
987 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
988 SchedRW = [WriteFShuffle] in {
989 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
990 "movaps\t{$src, $dst|$dst, $src}", [],
992 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
993 "movapd\t{$src, $dst|$dst, $src}", [],
995 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
996 "movups\t{$src, $dst|$dst, $src}", [],
998 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
999 "movupd\t{$src, $dst|$dst, $src}", [],
1003 let Predicates = [HasAVX] in {
1004 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
1005 (VMOVUPSmr addr:$dst, VR128:$src)>;
1006 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
1007 (VMOVUPDmr addr:$dst, VR128:$src)>;
1010 let Predicates = [UseSSE1] in
1011 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
1012 (MOVUPSmr addr:$dst, VR128:$src)>;
1013 let Predicates = [UseSSE2] in
1014 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
1015 (MOVUPDmr addr:$dst, VR128:$src)>;
1017 // Use vmovaps/vmovups for AVX integer load/store.
1018 let Predicates = [HasAVX, NoVLX] in {
1019 // 128-bit load/store
1020 def : Pat<(alignedloadv2i64 addr:$src),
1021 (VMOVAPSrm addr:$src)>;
1022 def : Pat<(loadv2i64 addr:$src),
1023 (VMOVUPSrm addr:$src)>;
1025 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1026 (VMOVAPSmr addr:$dst, VR128:$src)>;
1027 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1028 (VMOVAPSmr addr:$dst, VR128:$src)>;
1029 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1030 (VMOVAPSmr addr:$dst, VR128:$src)>;
1031 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1032 (VMOVAPSmr addr:$dst, VR128:$src)>;
1033 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1034 (VMOVUPSmr addr:$dst, VR128:$src)>;
1035 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1036 (VMOVUPSmr addr:$dst, VR128:$src)>;
1037 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1038 (VMOVUPSmr addr:$dst, VR128:$src)>;
1039 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1040 (VMOVUPSmr addr:$dst, VR128:$src)>;
1042 // 256-bit load/store
1043 def : Pat<(alignedloadv4i64 addr:$src),
1044 (VMOVAPSYrm addr:$src)>;
1045 def : Pat<(loadv4i64 addr:$src),
1046 (VMOVUPSYrm addr:$src)>;
1047 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1048 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1049 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1050 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1051 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1052 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1053 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1054 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1055 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1056 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1057 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1058 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1059 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1060 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1061 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1062 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1064 // Special patterns for storing subvector extracts of lower 128-bits
1065 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
1066 def : Pat<(alignedstore (v2f64 (extract_subvector
1067 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1068 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1069 def : Pat<(alignedstore (v4f32 (extract_subvector
1070 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1071 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1072 def : Pat<(alignedstore (v2i64 (extract_subvector
1073 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1074 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1075 def : Pat<(alignedstore (v4i32 (extract_subvector
1076 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1077 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1078 def : Pat<(alignedstore (v8i16 (extract_subvector
1079 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1080 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1081 def : Pat<(alignedstore (v16i8 (extract_subvector
1082 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1083 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1085 def : Pat<(store (v2f64 (extract_subvector
1086 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1087 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1088 def : Pat<(store (v4f32 (extract_subvector
1089 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1090 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1091 def : Pat<(store (v2i64 (extract_subvector
1092 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1093 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1094 def : Pat<(store (v4i32 (extract_subvector
1095 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1096 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1097 def : Pat<(store (v8i16 (extract_subvector
1098 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1099 (VMOVUPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1100 def : Pat<(store (v16i8 (extract_subvector
1101 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1102 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1105 // Use movaps / movups for SSE integer load / store (one byte shorter).
1106 // The instructions selected below are then converted to MOVDQA/MOVDQU
1107 // during the SSE domain pass.
1108 let Predicates = [UseSSE1] in {
1109 def : Pat<(alignedloadv2i64 addr:$src),
1110 (MOVAPSrm addr:$src)>;
1111 def : Pat<(loadv2i64 addr:$src),
1112 (MOVUPSrm addr:$src)>;
1114 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1115 (MOVAPSmr addr:$dst, VR128:$src)>;
1116 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1117 (MOVAPSmr addr:$dst, VR128:$src)>;
1118 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1119 (MOVAPSmr addr:$dst, VR128:$src)>;
1120 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1121 (MOVAPSmr addr:$dst, VR128:$src)>;
1122 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1123 (MOVUPSmr addr:$dst, VR128:$src)>;
1124 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1125 (MOVUPSmr addr:$dst, VR128:$src)>;
1126 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1127 (MOVUPSmr addr:$dst, VR128:$src)>;
1128 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1129 (MOVUPSmr addr:$dst, VR128:$src)>;
1132 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1133 // bits are disregarded. FIXME: Set encoding to pseudo!
1134 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1135 let isCodeGenOnly = 1 in {
1136 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1137 "movaps\t{$src, $dst|$dst, $src}",
1138 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1139 IIC_SSE_MOVA_P_RM>, VEX;
1140 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1141 "movapd\t{$src, $dst|$dst, $src}",
1142 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1143 IIC_SSE_MOVA_P_RM>, VEX;
1144 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1145 "movaps\t{$src, $dst|$dst, $src}",
1146 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1148 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1149 "movapd\t{$src, $dst|$dst, $src}",
1150 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1155 //===----------------------------------------------------------------------===//
1156 // SSE 1 & 2 - Move Low packed FP Instructions
1157 //===----------------------------------------------------------------------===//
1159 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
1160 string base_opc, string asm_opr,
1161 InstrItinClass itin> {
1162 def PSrm : PI<opc, MRMSrcMem,
1163 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1164 !strconcat(base_opc, "s", asm_opr),
1166 (psnode VR128:$src1,
1167 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1168 itin, SSEPackedSingle>, PS,
1169 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1171 def PDrm : PI<opc, MRMSrcMem,
1172 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1173 !strconcat(base_opc, "d", asm_opr),
1174 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
1175 (scalar_to_vector (loadf64 addr:$src2)))))],
1176 itin, SSEPackedDouble>, PD,
1177 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1181 multiclass sse12_mov_hilo_packed<bits<8>opc, SDNode psnode, SDNode pdnode,
1182 string base_opc, InstrItinClass itin> {
1183 defm V#NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1184 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1187 let Constraints = "$src1 = $dst" in
1188 defm NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1189 "\t{$src2, $dst|$dst, $src2}",
1193 let AddedComplexity = 20 in {
1194 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp",
1198 let SchedRW = [WriteStore] in {
1199 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1200 "movlps\t{$src, $dst|$dst, $src}",
1201 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1202 (iPTR 0))), addr:$dst)],
1203 IIC_SSE_MOV_LH>, VEX;
1204 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1205 "movlpd\t{$src, $dst|$dst, $src}",
1206 [(store (f64 (vector_extract (v2f64 VR128:$src),
1207 (iPTR 0))), addr:$dst)],
1208 IIC_SSE_MOV_LH>, VEX;
1209 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1210 "movlps\t{$src, $dst|$dst, $src}",
1211 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1212 (iPTR 0))), addr:$dst)],
1214 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1215 "movlpd\t{$src, $dst|$dst, $src}",
1216 [(store (f64 (vector_extract (v2f64 VR128:$src),
1217 (iPTR 0))), addr:$dst)],
1221 let Predicates = [HasAVX] in {
1222 // Shuffle with VMOVLPS
1223 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1224 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1225 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1226 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1228 // Shuffle with VMOVLPD
1229 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1230 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1231 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1232 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1233 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1234 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1235 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1238 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1240 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1241 def : Pat<(store (v4i32 (X86Movlps
1242 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1243 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1244 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1246 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1247 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1249 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1252 let Predicates = [UseSSE1] in {
1253 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1254 def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
1255 (iPTR 0))), addr:$src1),
1256 (MOVLPSmr addr:$src1, VR128:$src2)>;
1258 // Shuffle with MOVLPS
1259 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1260 (MOVLPSrm VR128:$src1, addr:$src2)>;
1261 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1262 (MOVLPSrm VR128:$src1, addr:$src2)>;
1263 def : Pat<(X86Movlps VR128:$src1,
1264 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1265 (MOVLPSrm VR128:$src1, addr:$src2)>;
1268 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1270 (MOVLPSmr addr:$src1, VR128:$src2)>;
1271 def : Pat<(store (v4i32 (X86Movlps
1272 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1274 (MOVLPSmr addr:$src1, VR128:$src2)>;
1277 let Predicates = [UseSSE2] in {
1278 // Shuffle with MOVLPD
1279 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1280 (MOVLPDrm VR128:$src1, addr:$src2)>;
1281 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1282 (MOVLPDrm VR128:$src1, addr:$src2)>;
1283 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1284 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1285 (MOVLPDrm VR128:$src1, addr:$src2)>;
1288 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1290 (MOVLPDmr addr:$src1, VR128:$src2)>;
1291 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1293 (MOVLPDmr addr:$src1, VR128:$src2)>;
1296 //===----------------------------------------------------------------------===//
1297 // SSE 1 & 2 - Move Hi packed FP Instructions
1298 //===----------------------------------------------------------------------===//
1300 let AddedComplexity = 20 in {
1301 defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Movlhpd, "movhp",
1305 let SchedRW = [WriteStore] in {
1306 // v2f64 extract element 1 is always custom lowered to unpack high to low
1307 // and extract element 0 so the non-store version isn't too horrible.
1308 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1309 "movhps\t{$src, $dst|$dst, $src}",
1310 [(store (f64 (vector_extract
1311 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1312 (bc_v2f64 (v4f32 VR128:$src))),
1313 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1314 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1315 "movhpd\t{$src, $dst|$dst, $src}",
1316 [(store (f64 (vector_extract
1317 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1318 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1319 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1320 "movhps\t{$src, $dst|$dst, $src}",
1321 [(store (f64 (vector_extract
1322 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1323 (bc_v2f64 (v4f32 VR128:$src))),
1324 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1325 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1326 "movhpd\t{$src, $dst|$dst, $src}",
1327 [(store (f64 (vector_extract
1328 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1329 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1332 let Predicates = [HasAVX] in {
1334 def : Pat<(X86Movlhps VR128:$src1,
1335 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1336 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1337 def : Pat<(X86Movlhps VR128:$src1,
1338 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1339 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1343 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1344 // is during lowering, where it's not possible to recognize the load fold
1345 // cause it has two uses through a bitcast. One use disappears at isel time
1346 // and the fold opportunity reappears.
1347 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1348 (scalar_to_vector (loadf64 addr:$src2)))),
1349 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1350 // Also handle an i64 load because that may get selected as a faster way to
1352 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1353 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1354 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1356 def : Pat<(store (f64 (vector_extract
1357 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
1358 (iPTR 0))), addr:$dst),
1359 (VMOVHPDmr addr:$dst, VR128:$src)>;
1362 let Predicates = [UseSSE1] in {
1364 def : Pat<(X86Movlhps VR128:$src1,
1365 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1366 (MOVHPSrm VR128:$src1, addr:$src2)>;
1367 def : Pat<(X86Movlhps VR128:$src1,
1368 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1369 (MOVHPSrm VR128:$src1, addr:$src2)>;
1372 let Predicates = [UseSSE2] in {
1375 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1376 // is during lowering, where it's not possible to recognize the load fold
1377 // cause it has two uses through a bitcast. One use disappears at isel time
1378 // and the fold opportunity reappears.
1379 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1380 (scalar_to_vector (loadf64 addr:$src2)))),
1381 (MOVHPDrm VR128:$src1, addr:$src2)>;
1382 // Also handle an i64 load because that may get selected as a faster way to
1384 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1385 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1386 (MOVHPDrm VR128:$src1, addr:$src2)>;
1388 def : Pat<(store (f64 (vector_extract
1389 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
1390 (iPTR 0))), addr:$dst),
1391 (MOVHPDmr addr:$dst, VR128:$src)>;
1394 //===----------------------------------------------------------------------===//
1395 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1396 //===----------------------------------------------------------------------===//
1398 let AddedComplexity = 20, Predicates = [UseAVX] in {
1399 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1400 (ins VR128:$src1, VR128:$src2),
1401 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1403 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1405 VEX_4V, Sched<[WriteFShuffle]>;
1406 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1407 (ins VR128:$src1, VR128:$src2),
1408 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1410 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1412 VEX_4V, Sched<[WriteFShuffle]>;
1414 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1415 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1416 (ins VR128:$src1, VR128:$src2),
1417 "movlhps\t{$src2, $dst|$dst, $src2}",
1419 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1420 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1421 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1422 (ins VR128:$src1, VR128:$src2),
1423 "movhlps\t{$src2, $dst|$dst, $src2}",
1425 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1426 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1429 let Predicates = [UseAVX] in {
1431 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1432 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1433 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1434 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1437 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1438 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1441 let Predicates = [UseSSE1] in {
1443 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1444 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1445 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1446 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1449 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1450 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1453 //===----------------------------------------------------------------------===//
1454 // SSE 1 & 2 - Conversion Instructions
1455 //===----------------------------------------------------------------------===//
1457 def SSE_CVT_PD : OpndItins<
1458 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1461 let Sched = WriteCvtI2F in
1462 def SSE_CVT_PS : OpndItins<
1463 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1466 let Sched = WriteCvtI2F in
1467 def SSE_CVT_Scalar : OpndItins<
1468 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1471 let Sched = WriteCvtF2I in
1472 def SSE_CVT_SS2SI_32 : OpndItins<
1473 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1476 let Sched = WriteCvtF2I in
1477 def SSE_CVT_SS2SI_64 : OpndItins<
1478 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1481 let Sched = WriteCvtF2I in
1482 def SSE_CVT_SD2SI : OpndItins<
1483 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1486 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1487 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1488 string asm, OpndItins itins> {
1489 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1490 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1491 itins.rr>, Sched<[itins.Sched]>;
1492 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1493 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1494 itins.rm>, Sched<[itins.Sched.Folded]>;
1497 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1498 X86MemOperand x86memop, string asm, Domain d,
1500 let hasSideEffects = 0 in {
1501 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1502 [], itins.rr, d>, Sched<[itins.Sched]>;
1504 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1505 [], itins.rm, d>, Sched<[itins.Sched.Folded]>;
1509 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1510 X86MemOperand x86memop, string asm> {
1511 let hasSideEffects = 0, Predicates = [UseAVX] in {
1512 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1513 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1514 Sched<[WriteCvtI2F]>;
1516 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1517 (ins DstRC:$src1, x86memop:$src),
1518 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1519 Sched<[WriteCvtI2FLd, ReadAfterLd]>;
1520 } // hasSideEffects = 0
1523 let Predicates = [UseAVX] in {
1524 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1525 "cvttss2si\t{$src, $dst|$dst, $src}",
1528 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1529 "cvttss2si\t{$src, $dst|$dst, $src}",
1531 XS, VEX, VEX_W, VEX_LIG;
1532 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1533 "cvttsd2si\t{$src, $dst|$dst, $src}",
1536 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1537 "cvttsd2si\t{$src, $dst|$dst, $src}",
1539 XD, VEX, VEX_W, VEX_LIG;
1541 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1542 (VCVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1543 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1544 (VCVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1545 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1546 (VCVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1547 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1548 (VCVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1549 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1550 (VCVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1551 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1552 (VCVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1553 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1554 (VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1555 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1556 (VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1558 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1559 // register, but the same isn't true when only using memory operands,
1560 // provide other assembly "l" and "q" forms to address this explicitly
1561 // where appropriate to do so.
1562 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}">,
1563 XS, VEX_4V, VEX_LIG;
1564 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1565 XS, VEX_4V, VEX_W, VEX_LIG;
1566 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
1567 XD, VEX_4V, VEX_LIG;
1568 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1569 XD, VEX_4V, VEX_W, VEX_LIG;
1571 let Predicates = [UseAVX] in {
1572 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1573 (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1574 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1575 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1577 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1578 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1579 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1580 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1581 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1582 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1583 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1584 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1586 def : Pat<(f32 (sint_to_fp GR32:$src)),
1587 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1588 def : Pat<(f32 (sint_to_fp GR64:$src)),
1589 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1590 def : Pat<(f64 (sint_to_fp GR32:$src)),
1591 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1592 def : Pat<(f64 (sint_to_fp GR64:$src)),
1593 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1596 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1597 "cvttss2si\t{$src, $dst|$dst, $src}",
1598 SSE_CVT_SS2SI_32>, XS;
1599 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1600 "cvttss2si\t{$src, $dst|$dst, $src}",
1601 SSE_CVT_SS2SI_64>, XS, REX_W;
1602 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1603 "cvttsd2si\t{$src, $dst|$dst, $src}",
1605 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1606 "cvttsd2si\t{$src, $dst|$dst, $src}",
1607 SSE_CVT_SD2SI>, XD, REX_W;
1608 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1609 "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
1610 SSE_CVT_Scalar>, XS;
1611 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1612 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1613 SSE_CVT_Scalar>, XS, REX_W;
1614 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1615 "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
1616 SSE_CVT_Scalar>, XD;
1617 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1618 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1619 SSE_CVT_Scalar>, XD, REX_W;
1621 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1622 (CVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1623 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1624 (CVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1625 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1626 (CVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1627 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1628 (CVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1629 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1630 (CVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1631 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1632 (CVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1633 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1634 (CVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1635 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1636 (CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1638 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1639 (CVTSI2SSrm FR64:$dst, i32mem:$src), 0>;
1640 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1641 (CVTSI2SDrm FR64:$dst, i32mem:$src), 0>;
1643 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1644 // and/or XMM operand(s).
1646 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1647 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1648 string asm, OpndItins itins> {
1649 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1650 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1651 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
1652 Sched<[itins.Sched]>;
1653 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1654 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1655 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
1656 Sched<[itins.Sched.Folded]>;
1659 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1660 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1661 PatFrag ld_frag, string asm, OpndItins itins,
1663 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1665 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1666 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1667 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1668 itins.rr>, Sched<[itins.Sched]>;
1669 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1670 (ins DstRC:$src1, x86memop:$src2),
1672 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1673 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1674 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1675 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
1678 let Predicates = [UseAVX] in {
1679 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1680 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si",
1681 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1682 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1683 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si",
1684 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1686 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1687 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD;
1688 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1689 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1692 let isCodeGenOnly = 1 in {
1693 let Predicates = [UseAVX] in {
1694 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1695 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
1696 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1697 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1698 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1699 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1701 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1702 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
1703 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1704 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1705 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1706 SSE_CVT_Scalar, 0>, XD,
1709 let Constraints = "$src1 = $dst" in {
1710 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1711 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1712 "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
1713 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1714 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1715 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1716 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1717 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1718 "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
1719 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1720 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1721 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1723 } // isCodeGenOnly = 1
1727 // Aliases for intrinsics
1728 let isCodeGenOnly = 1 in {
1729 let Predicates = [UseAVX] in {
1730 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1731 ssmem, sse_load_f32, "cvttss2si",
1732 SSE_CVT_SS2SI_32>, XS, VEX;
1733 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1734 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1735 "cvttss2si", SSE_CVT_SS2SI_64>,
1737 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1738 sdmem, sse_load_f64, "cvttsd2si",
1739 SSE_CVT_SD2SI>, XD, VEX;
1740 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1741 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1742 "cvttsd2si", SSE_CVT_SD2SI>,
1745 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1746 ssmem, sse_load_f32, "cvttss2si",
1747 SSE_CVT_SS2SI_32>, XS;
1748 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1749 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1750 "cvttss2si", SSE_CVT_SS2SI_64>, XS, REX_W;
1751 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1752 sdmem, sse_load_f64, "cvttsd2si",
1754 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1755 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1756 "cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1757 } // isCodeGenOnly = 1
1759 let Predicates = [UseAVX] in {
1760 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1761 ssmem, sse_load_f32, "cvtss2si",
1762 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1763 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1764 ssmem, sse_load_f32, "cvtss2si",
1765 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1767 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1768 ssmem, sse_load_f32, "cvtss2si",
1769 SSE_CVT_SS2SI_32>, XS;
1770 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1771 ssmem, sse_load_f32, "cvtss2si",
1772 SSE_CVT_SS2SI_64>, XS, REX_W;
1774 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1775 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1776 SSEPackedSingle, SSE_CVT_PS>,
1777 PS, VEX, Requires<[HasAVX]>;
1778 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1779 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1780 SSEPackedSingle, SSE_CVT_PS>,
1781 PS, VEX, VEX_L, Requires<[HasAVX]>;
1783 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1784 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1785 SSEPackedSingle, SSE_CVT_PS>,
1786 PS, Requires<[UseSSE2]>;
1788 let Predicates = [UseAVX] in {
1789 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1790 (VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1791 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1792 (VCVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1793 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1794 (VCVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1795 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1796 (VCVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1797 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1798 (VCVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1799 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1800 (VCVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1801 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1802 (VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1803 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1804 (VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1807 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1808 (CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1809 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1810 (CVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1811 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1812 (CVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1813 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1814 (CVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1815 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1816 (CVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1817 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1818 (CVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1819 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1820 (CVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1821 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1822 (CVTSD2SI64rm GR64:$dst, sdmem:$src)>;
1826 // Convert scalar double to scalar single
1827 let hasSideEffects = 0, Predicates = [UseAVX] in {
1828 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1829 (ins FR64:$src1, FR64:$src2),
1830 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1831 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG,
1832 Sched<[WriteCvtF2F]>;
1834 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1835 (ins FR64:$src1, f64mem:$src2),
1836 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1837 [], IIC_SSE_CVT_Scalar_RM>,
1838 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG,
1839 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1842 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1845 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1846 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1847 [(set FR32:$dst, (fround FR64:$src))],
1848 IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>;
1849 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1850 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1851 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1852 IIC_SSE_CVT_Scalar_RM>,
1854 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1856 let isCodeGenOnly = 1 in {
1857 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1858 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1859 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1861 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1862 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[UseAVX]>,
1863 Sched<[WriteCvtF2F]>;
1864 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1865 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1866 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1867 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1868 VR128:$src1, sse_load_f64:$src2))],
1869 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[UseAVX]>,
1870 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1872 let Constraints = "$src1 = $dst" in {
1873 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1874 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1875 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1877 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1878 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
1879 Sched<[WriteCvtF2F]>;
1880 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1881 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1882 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1883 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1884 VR128:$src1, sse_load_f64:$src2))],
1885 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
1886 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1888 } // isCodeGenOnly = 1
1890 // Convert scalar single to scalar double
1891 // SSE2 instructions with XS prefix
1892 let hasSideEffects = 0, Predicates = [UseAVX] in {
1893 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1894 (ins FR32:$src1, FR32:$src2),
1895 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1896 [], IIC_SSE_CVT_Scalar_RR>,
1897 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG,
1898 Sched<[WriteCvtF2F]>;
1900 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1901 (ins FR32:$src1, f32mem:$src2),
1902 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1903 [], IIC_SSE_CVT_Scalar_RM>,
1904 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>,
1905 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1908 def : Pat<(f64 (fextend FR32:$src)),
1909 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>;
1910 def : Pat<(fextend (loadf32 addr:$src)),
1911 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>;
1913 def : Pat<(extloadf32 addr:$src),
1914 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1915 Requires<[UseAVX, OptForSize]>;
1916 def : Pat<(extloadf32 addr:$src),
1917 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1918 Requires<[UseAVX, OptForSpeed]>;
1920 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1921 "cvtss2sd\t{$src, $dst|$dst, $src}",
1922 [(set FR64:$dst, (fextend FR32:$src))],
1923 IIC_SSE_CVT_Scalar_RR>, XS,
1924 Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
1925 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1926 "cvtss2sd\t{$src, $dst|$dst, $src}",
1927 [(set FR64:$dst, (extloadf32 addr:$src))],
1928 IIC_SSE_CVT_Scalar_RM>, XS,
1929 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1931 // extload f32 -> f64. This matches load+fextend because we have a hack in
1932 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1934 // Since these loads aren't folded into the fextend, we have to match it
1936 def : Pat<(fextend (loadf32 addr:$src)),
1937 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1938 def : Pat<(extloadf32 addr:$src),
1939 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1941 let isCodeGenOnly = 1 in {
1942 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1943 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1944 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1946 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1947 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[UseAVX]>,
1948 Sched<[WriteCvtF2F]>;
1949 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1950 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1951 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1953 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1954 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[UseAVX]>,
1955 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1956 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1957 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1958 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1959 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1961 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1962 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
1963 Sched<[WriteCvtF2F]>;
1964 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1965 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1966 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1968 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1969 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
1970 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1972 } // isCodeGenOnly = 1
1974 // Convert packed single/double fp to doubleword
1975 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1976 "cvtps2dq\t{$src, $dst|$dst, $src}",
1977 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1978 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
1979 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1980 "cvtps2dq\t{$src, $dst|$dst, $src}",
1982 (int_x86_sse2_cvtps2dq (loadv4f32 addr:$src)))],
1983 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
1984 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1985 "cvtps2dq\t{$src, $dst|$dst, $src}",
1987 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1988 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
1989 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1990 "cvtps2dq\t{$src, $dst|$dst, $src}",
1992 (int_x86_avx_cvt_ps2dq_256 (loadv8f32 addr:$src)))],
1993 IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
1994 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1995 "cvtps2dq\t{$src, $dst|$dst, $src}",
1996 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1997 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
1998 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1999 "cvtps2dq\t{$src, $dst|$dst, $src}",
2001 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
2002 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2005 // Convert Packed Double FP to Packed DW Integers
2006 let Predicates = [HasAVX] in {
2007 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2008 // register, but the same isn't true when using memory operands instead.
2009 // Provide other assembly rr and rm forms to address this explicitly.
2010 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2011 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
2012 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
2013 VEX, Sched<[WriteCvtF2I]>;
2016 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2017 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
2018 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2019 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2021 (int_x86_sse2_cvtpd2dq (loadv2f64 addr:$src)))]>, VEX,
2022 Sched<[WriteCvtF2ILd]>;
2025 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2026 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2028 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L,
2029 Sched<[WriteCvtF2I]>;
2030 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2031 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2033 (int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
2034 VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2035 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
2036 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2039 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2040 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2042 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
2043 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
2044 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2045 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2046 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
2047 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2049 // Convert with truncation packed single/double fp to doubleword
2050 // SSE2 packed instructions with XS prefix
2051 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2052 "cvttps2dq\t{$src, $dst|$dst, $src}",
2054 (int_x86_sse2_cvttps2dq VR128:$src))],
2055 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
2056 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2057 "cvttps2dq\t{$src, $dst|$dst, $src}",
2058 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
2059 (loadv4f32 addr:$src)))],
2060 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2061 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2062 "cvttps2dq\t{$src, $dst|$dst, $src}",
2064 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
2065 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2066 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2067 "cvttps2dq\t{$src, $dst|$dst, $src}",
2068 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
2069 (loadv8f32 addr:$src)))],
2070 IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
2071 Sched<[WriteCvtF2ILd]>;
2073 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2074 "cvttps2dq\t{$src, $dst|$dst, $src}",
2075 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
2076 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
2077 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2078 "cvttps2dq\t{$src, $dst|$dst, $src}",
2080 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
2081 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2083 let Predicates = [HasAVX] in {
2084 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2085 (VCVTDQ2PSrr VR128:$src)>;
2086 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2087 (VCVTDQ2PSrm addr:$src)>;
2089 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2090 (VCVTDQ2PSrr VR128:$src)>;
2091 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (loadv2i64 addr:$src))),
2092 (VCVTDQ2PSrm addr:$src)>;
2094 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2095 (VCVTTPS2DQrr VR128:$src)>;
2096 def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
2097 (VCVTTPS2DQrm addr:$src)>;
2099 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
2100 (VCVTDQ2PSYrr VR256:$src)>;
2101 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (loadv4i64 addr:$src)))),
2102 (VCVTDQ2PSYrm addr:$src)>;
2104 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
2105 (VCVTTPS2DQYrr VR256:$src)>;
2106 def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
2107 (VCVTTPS2DQYrm addr:$src)>;
2110 let Predicates = [UseSSE2] in {
2111 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2112 (CVTDQ2PSrr VR128:$src)>;
2113 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2114 (CVTDQ2PSrm addr:$src)>;
2116 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2117 (CVTDQ2PSrr VR128:$src)>;
2118 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
2119 (CVTDQ2PSrm addr:$src)>;
2121 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2122 (CVTTPS2DQrr VR128:$src)>;
2123 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
2124 (CVTTPS2DQrm addr:$src)>;
2127 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2128 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2130 (int_x86_sse2_cvttpd2dq VR128:$src))],
2131 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>;
2133 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2134 // register, but the same isn't true when using memory operands instead.
2135 // Provide other assembly rr and rm forms to address this explicitly.
2138 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
2139 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
2140 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2141 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
2142 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2143 (loadv2f64 addr:$src)))],
2144 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2147 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2148 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2150 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
2151 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2152 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2153 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2155 (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
2156 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2157 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
2158 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2160 let Predicates = [HasAVX] in {
2161 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
2162 (VCVTTPD2DQYrr VR256:$src)>;
2163 def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
2164 (VCVTTPD2DQYrm addr:$src)>;
2165 } // Predicates = [HasAVX]
2167 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2168 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2169 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
2170 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2171 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
2172 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2173 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2174 (memopv2f64 addr:$src)))],
2176 Sched<[WriteCvtF2ILd]>;
2178 // Convert packed single to packed double
2179 let Predicates = [HasAVX] in {
2180 // SSE2 instructions without OpSize prefix
2181 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2182 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2183 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2184 IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
2185 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2186 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2187 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2188 IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
2189 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2190 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2192 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
2193 IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2194 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2195 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2197 (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
2198 IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2201 let Predicates = [UseSSE2] in {
2202 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2203 "cvtps2pd\t{$src, $dst|$dst, $src}",
2204 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2205 IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
2206 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2207 "cvtps2pd\t{$src, $dst|$dst, $src}",
2208 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2209 IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
2212 // Convert Packed DW Integers to Packed Double FP
2213 let Predicates = [HasAVX] in {
2214 let hasSideEffects = 0, mayLoad = 1 in
2215 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2216 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2217 []>, VEX, Sched<[WriteCvtI2FLd]>;
2218 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2219 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2221 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX,
2222 Sched<[WriteCvtI2F]>;
2223 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2224 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2226 (int_x86_avx_cvtdq2_pd_256
2227 (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
2228 Sched<[WriteCvtI2FLd]>;
2229 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2230 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2232 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L,
2233 Sched<[WriteCvtI2F]>;
2236 let hasSideEffects = 0, mayLoad = 1 in
2237 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2238 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2239 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
2240 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2241 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2242 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2243 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
2245 // AVX 256-bit register conversion intrinsics
2246 let Predicates = [HasAVX] in {
2247 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2248 (VCVTDQ2PDYrr VR128:$src)>;
2249 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2250 (VCVTDQ2PDYrm addr:$src)>;
2251 } // Predicates = [HasAVX]
2253 // Convert packed double to packed single
2254 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2255 // register, but the same isn't true when using memory operands instead.
2256 // Provide other assembly rr and rm forms to address this explicitly.
2257 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2258 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2259 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2260 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>;
2263 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2264 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
2265 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2266 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2268 (int_x86_sse2_cvtpd2ps (loadv2f64 addr:$src)))],
2269 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>;
2272 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2273 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2275 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2276 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2277 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2278 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2280 (int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
2281 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2282 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2283 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
2285 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2286 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2287 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2288 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>;
2289 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2290 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2292 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2293 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>;
2296 // AVX 256-bit register conversion intrinsics
2297 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2298 // whenever possible to avoid declaring two versions of each one.
2299 let Predicates = [HasAVX] in {
2300 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2301 (VCVTDQ2PSYrr VR256:$src)>;
2302 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (loadv4i64 addr:$src))),
2303 (VCVTDQ2PSYrm addr:$src)>;
2305 // Match fround and fextend for 128/256-bit conversions
2306 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2307 (VCVTPD2PSrr VR128:$src)>;
2308 def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
2309 (VCVTPD2PSXrm addr:$src)>;
2310 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2311 (VCVTPD2PSYrr VR256:$src)>;
2312 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2313 (VCVTPD2PSYrm addr:$src)>;
2315 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2316 (VCVTPS2PDrr VR128:$src)>;
2317 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2318 (VCVTPS2PDYrr VR128:$src)>;
2319 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2320 (VCVTPS2PDYrm addr:$src)>;
2323 let Predicates = [UseSSE2] in {
2324 // Match fround and fextend for 128 conversions
2325 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2326 (CVTPD2PSrr VR128:$src)>;
2327 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2328 (CVTPD2PSrm addr:$src)>;
2330 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2331 (CVTPS2PDrr VR128:$src)>;
2334 //===----------------------------------------------------------------------===//
2335 // SSE 1 & 2 - Compare Instructions
2336 //===----------------------------------------------------------------------===//
2338 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2339 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2340 Operand CC, SDNode OpNode, ValueType VT,
2341 PatFrag ld_frag, string asm, string asm_alt,
2342 OpndItins itins, ImmLeaf immLeaf> {
2343 def rr : SIi8<0xC2, MRMSrcReg,
2344 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2345 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, immLeaf:$cc))],
2346 itins.rr>, Sched<[itins.Sched]>;
2347 def rm : SIi8<0xC2, MRMSrcMem,
2348 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2349 [(set RC:$dst, (OpNode (VT RC:$src1),
2350 (ld_frag addr:$src2), immLeaf:$cc))],
2352 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2354 // Accept explicit immediate argument form instead of comparison code.
2355 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2356 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2357 (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, [],
2358 IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
2360 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2361 (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, [],
2362 IIC_SSE_ALU_F32S_RM>,
2363 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2367 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
2368 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2369 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2370 SSE_ALU_F32S, i8immZExt5>, XS, VEX_4V, VEX_LIG;
2371 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
2372 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2373 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2374 SSE_ALU_F32S, i8immZExt5>, // same latency as 32 bit compare
2375 XD, VEX_4V, VEX_LIG;
2377 let Constraints = "$src1 = $dst" in {
2378 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
2379 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2380 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S,
2382 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
2383 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2384 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2385 SSE_ALU_F64S, i8immZExt3>, XD;
2388 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2389 Intrinsic Int, string asm, OpndItins itins,
2391 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2392 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2393 [(set VR128:$dst, (Int VR128:$src1,
2394 VR128:$src, immLeaf:$cc))],
2396 Sched<[itins.Sched]>;
2397 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2398 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2399 [(set VR128:$dst, (Int VR128:$src1,
2400 (load addr:$src), immLeaf:$cc))],
2402 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2405 let isCodeGenOnly = 1 in {
2406 // Aliases to match intrinsics which expect XMM operand(s).
2407 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2408 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2409 SSE_ALU_F32S, i8immZExt5>,
2411 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2412 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2413 SSE_ALU_F32S, i8immZExt5>, // same latency as f32
2415 let Constraints = "$src1 = $dst" in {
2416 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2417 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2418 SSE_ALU_F32S, i8immZExt3>, XS;
2419 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2420 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2421 SSE_ALU_F64S, i8immZExt3>,
2427 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2428 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2429 ValueType vt, X86MemOperand x86memop,
2430 PatFrag ld_frag, string OpcodeStr> {
2431 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2432 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2433 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2436 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2437 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2438 [(set EFLAGS, (OpNode (vt RC:$src1),
2439 (ld_frag addr:$src2)))],
2441 Sched<[WriteFAddLd, ReadAfterLd]>;
2444 let Defs = [EFLAGS] in {
2445 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2446 "ucomiss">, PS, VEX, VEX_LIG;
2447 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2448 "ucomisd">, PD, VEX, VEX_LIG;
2449 let Pattern = []<dag> in {
2450 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2451 "comiss">, PS, VEX, VEX_LIG;
2452 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2453 "comisd">, PD, VEX, VEX_LIG;
2456 let isCodeGenOnly = 1 in {
2457 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2458 load, "ucomiss">, PS, VEX;
2459 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2460 load, "ucomisd">, PD, VEX;
2462 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2463 load, "comiss">, PS, VEX;
2464 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2465 load, "comisd">, PD, VEX;
2467 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2469 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2472 let Pattern = []<dag> in {
2473 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2475 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2479 let isCodeGenOnly = 1 in {
2480 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2481 load, "ucomiss">, PS;
2482 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2483 load, "ucomisd">, PD;
2485 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2487 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2490 } // Defs = [EFLAGS]
2492 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2493 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2494 Operand CC, Intrinsic Int, string asm,
2495 string asm_alt, Domain d, ImmLeaf immLeaf,
2496 PatFrag ld_frag, OpndItins itins = SSE_ALU_F32P> {
2497 let isCommutable = 1 in
2498 def rri : PIi8<0xC2, MRMSrcReg,
2499 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2500 [(set RC:$dst, (Int RC:$src1, RC:$src2, immLeaf:$cc))],
2503 def rmi : PIi8<0xC2, MRMSrcMem,
2504 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2505 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2), immLeaf:$cc))],
2507 Sched<[WriteFAddLd, ReadAfterLd]>;
2509 // Accept explicit immediate argument form instead of comparison code.
2510 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2511 def rri_alt : PIi8<0xC2, MRMSrcReg,
2512 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
2513 asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
2515 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2516 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
2517 asm_alt, [], itins.rm, d>,
2518 Sched<[WriteFAddLd, ReadAfterLd]>;
2522 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2523 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2524 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2525 SSEPackedSingle, i8immZExt5, loadv4f32>, PS, VEX_4V;
2526 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2527 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2528 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2529 SSEPackedDouble, i8immZExt5, loadv2f64>, PD, VEX_4V;
2530 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2531 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2532 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2533 SSEPackedSingle, i8immZExt5, loadv8f32>, PS, VEX_4V, VEX_L;
2534 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2535 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2536 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2537 SSEPackedDouble, i8immZExt5, loadv4f64>, PD, VEX_4V, VEX_L;
2538 let Constraints = "$src1 = $dst" in {
2539 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2540 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2541 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2542 SSEPackedSingle, i8immZExt5, memopv4f32, SSE_ALU_F32P>, PS;
2543 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2544 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2545 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2546 SSEPackedDouble, i8immZExt5, memopv2f64, SSE_ALU_F64P>, PD;
2549 let Predicates = [HasAVX] in {
2550 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2551 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2552 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
2553 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2554 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2555 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2556 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
2557 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2559 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2560 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2561 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
2562 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2563 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2564 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2565 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
2566 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2569 let Predicates = [UseSSE1] in {
2570 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2571 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2572 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
2573 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2576 let Predicates = [UseSSE2] in {
2577 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2578 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2579 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
2580 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2583 //===----------------------------------------------------------------------===//
2584 // SSE 1 & 2 - Shuffle Instructions
2585 //===----------------------------------------------------------------------===//
2587 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2588 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2589 ValueType vt, string asm, PatFrag mem_frag,
2591 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2592 (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
2593 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2594 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2595 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2596 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2597 (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
2598 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2599 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2600 Sched<[WriteFShuffle]>;
2603 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2604 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2605 loadv4f32, SSEPackedSingle>, PS, VEX_4V;
2606 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2607 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2608 loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
2609 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2610 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2611 loadv2f64, SSEPackedDouble>, PD, VEX_4V;
2612 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2613 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2614 loadv4f64, SSEPackedDouble>, PD, VEX_4V, VEX_L;
2616 let Constraints = "$src1 = $dst" in {
2617 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2618 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2619 memopv4f32, SSEPackedSingle>, PS;
2620 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2621 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2622 memopv2f64, SSEPackedDouble>, PD;
2625 let Predicates = [HasAVX] in {
2626 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2627 (bc_v4i32 (loadv2i64 addr:$src2)), (i8 imm:$imm))),
2628 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2629 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2630 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2632 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2633 (loadv2i64 addr:$src2), (i8 imm:$imm))),
2634 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2635 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2636 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2639 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2640 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2641 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2642 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
2643 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2645 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2646 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2647 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2648 (loadv4i64 addr:$src2), (i8 imm:$imm))),
2649 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2652 let Predicates = [UseSSE1] in {
2653 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2654 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2655 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2656 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2657 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2660 let Predicates = [UseSSE2] in {
2661 // Generic SHUFPD patterns
2662 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2663 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2664 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2665 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2666 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2669 //===----------------------------------------------------------------------===//
2670 // SSE 1 & 2 - Unpack FP Instructions
2671 //===----------------------------------------------------------------------===//
2673 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2674 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2675 PatFrag mem_frag, RegisterClass RC,
2676 X86MemOperand x86memop, string asm,
2678 def rr : PI<opc, MRMSrcReg,
2679 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2681 (vt (OpNode RC:$src1, RC:$src2)))],
2682 IIC_SSE_UNPCK, d>, Sched<[WriteFShuffle]>;
2683 def rm : PI<opc, MRMSrcMem,
2684 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2686 (vt (OpNode RC:$src1,
2687 (mem_frag addr:$src2))))],
2689 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2692 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
2693 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2694 SSEPackedSingle>, PS, VEX_4V;
2695 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
2696 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2697 SSEPackedDouble>, PD, VEX_4V;
2698 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
2699 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2700 SSEPackedSingle>, PS, VEX_4V;
2701 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
2702 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2703 SSEPackedDouble>, PD, VEX_4V;
2705 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
2706 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2707 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2708 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
2709 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2710 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2711 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
2712 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2713 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2714 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
2715 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2716 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2718 let Constraints = "$src1 = $dst" in {
2719 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2720 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2721 SSEPackedSingle>, PS;
2722 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2723 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2724 SSEPackedDouble>, PD;
2725 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2726 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2727 SSEPackedSingle>, PS;
2728 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2729 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2730 SSEPackedDouble>, PD;
2731 } // Constraints = "$src1 = $dst"
2733 let Predicates = [HasAVX1Only] in {
2734 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2735 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2736 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2737 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2738 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2739 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2740 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2741 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2743 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2744 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2745 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2746 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2747 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2748 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2749 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2750 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2753 //===----------------------------------------------------------------------===//
2754 // SSE 1 & 2 - Extract Floating-Point Sign mask
2755 //===----------------------------------------------------------------------===//
2757 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2758 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2760 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2761 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2762 [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
2763 Sched<[WriteVecLogic]>;
2766 let Predicates = [HasAVX] in {
2767 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2768 "movmskps", SSEPackedSingle>, PS, VEX;
2769 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2770 "movmskpd", SSEPackedDouble>, PD, VEX;
2771 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2772 "movmskps", SSEPackedSingle>, PS,
2774 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2775 "movmskpd", SSEPackedDouble>, PD,
2778 def : Pat<(i32 (X86fgetsign FR32:$src)),
2779 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
2780 def : Pat<(i64 (X86fgetsign FR32:$src)),
2781 (SUBREG_TO_REG (i64 0),
2782 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
2783 def : Pat<(i32 (X86fgetsign FR64:$src)),
2784 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
2785 def : Pat<(i64 (X86fgetsign FR64:$src)),
2786 (SUBREG_TO_REG (i64 0),
2787 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
2790 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2791 SSEPackedSingle>, PS;
2792 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2793 SSEPackedDouble>, PD;
2795 def : Pat<(i32 (X86fgetsign FR32:$src)),
2796 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
2797 Requires<[UseSSE1]>;
2798 def : Pat<(i64 (X86fgetsign FR32:$src)),
2799 (SUBREG_TO_REG (i64 0),
2800 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
2801 Requires<[UseSSE1]>;
2802 def : Pat<(i32 (X86fgetsign FR64:$src)),
2803 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
2804 Requires<[UseSSE2]>;
2805 def : Pat<(i64 (X86fgetsign FR64:$src)),
2806 (SUBREG_TO_REG (i64 0),
2807 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
2808 Requires<[UseSSE2]>;
2810 //===---------------------------------------------------------------------===//
2811 // SSE2 - Packed Integer Logical Instructions
2812 //===---------------------------------------------------------------------===//
2814 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2816 /// PDI_binop_rm - Simple SSE2 binary operator.
2817 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2818 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2819 X86MemOperand x86memop, OpndItins itins,
2820 bit IsCommutable, bit Is2Addr> {
2821 let isCommutable = IsCommutable in
2822 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2823 (ins RC:$src1, RC:$src2),
2825 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2826 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2827 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
2828 Sched<[itins.Sched]>;
2829 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2830 (ins RC:$src1, x86memop:$src2),
2832 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2833 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2834 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2835 (bitconvert (memop_frag addr:$src2)))))],
2837 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2839 } // ExeDomain = SSEPackedInt
2841 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2842 ValueType OpVT128, ValueType OpVT256,
2843 OpndItins itins, bit IsCommutable = 0> {
2844 let Predicates = [HasAVX, NoVLX] in
2845 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2846 VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
2848 let Constraints = "$src1 = $dst" in
2849 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2850 memopv2i64, i128mem, itins, IsCommutable, 1>;
2852 let Predicates = [HasAVX2, NoVLX] in
2853 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2854 OpVT256, VR256, loadv4i64, i256mem, itins,
2855 IsCommutable, 0>, VEX_4V, VEX_L;
2858 // These are ordered here for pattern ordering requirements with the fp versions
2860 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2861 SSE_VEC_BIT_ITINS_P, 1>;
2862 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2863 SSE_VEC_BIT_ITINS_P, 1>;
2864 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2865 SSE_VEC_BIT_ITINS_P, 1>;
2866 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2867 SSE_VEC_BIT_ITINS_P, 0>;
2869 //===----------------------------------------------------------------------===//
2870 // SSE 1 & 2 - Logical Instructions
2871 //===----------------------------------------------------------------------===//
2873 // Multiclass for scalars using the X86 logical operation aliases for FP.
2874 multiclass sse12_fp_packed_scalar_logical_alias<
2875 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2876 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2877 FR32, f32, f128mem, loadf32, SSEPackedSingle, itins, 0>,
2880 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2881 FR64, f64, f128mem, loadf64, SSEPackedDouble, itins, 0>,
2884 let Constraints = "$src1 = $dst" in {
2885 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2886 f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
2889 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2890 f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
2895 let isCodeGenOnly = 1 in {
2896 defm FsAND : sse12_fp_packed_scalar_logical_alias<0x54, "and", X86fand,
2898 defm FsOR : sse12_fp_packed_scalar_logical_alias<0x56, "or", X86for,
2900 defm FsXOR : sse12_fp_packed_scalar_logical_alias<0x57, "xor", X86fxor,
2903 let isCommutable = 0 in
2904 defm FsANDN : sse12_fp_packed_scalar_logical_alias<0x55, "andn", X86fandn,
2908 // Multiclass for vectors using the X86 logical operation aliases for FP.
2909 multiclass sse12_fp_packed_vector_logical_alias<
2910 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2911 let Predicates = [HasAVX, NoVLX] in {
2912 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2913 VR128, v4f32, f128mem, loadv4f32, SSEPackedSingle, itins, 0>,
2916 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2917 VR128, v2f64, f128mem, loadv2f64, SSEPackedDouble, itins, 0>,
2921 let Constraints = "$src1 = $dst" in {
2922 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2923 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins>,
2926 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2927 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins>,
2932 let isCodeGenOnly = 1 in {
2933 defm FvAND : sse12_fp_packed_vector_logical_alias<0x54, "and", X86fand,
2935 defm FvOR : sse12_fp_packed_vector_logical_alias<0x56, "or", X86for,
2937 defm FvXOR : sse12_fp_packed_vector_logical_alias<0x57, "xor", X86fxor,
2940 let isCommutable = 0 in
2941 defm FvANDN : sse12_fp_packed_vector_logical_alias<0x55, "andn", X86fandn,
2945 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2947 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2949 let Predicates = [HasAVX, NoVLX] in {
2950 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2951 !strconcat(OpcodeStr, "ps"), f256mem,
2952 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2953 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2954 (loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
2956 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2957 !strconcat(OpcodeStr, "pd"), f256mem,
2958 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2959 (bc_v4i64 (v4f64 VR256:$src2))))],
2960 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2961 (loadv4i64 addr:$src2)))], 0>,
2964 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2965 // are all promoted to v2i64, and the patterns are covered by the int
2966 // version. This is needed in SSE only, because v2i64 isn't supported on
2967 // SSE1, but only on SSE2.
2968 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2969 !strconcat(OpcodeStr, "ps"), f128mem, [],
2970 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2971 (loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
2973 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2974 !strconcat(OpcodeStr, "pd"), f128mem,
2975 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2976 (bc_v2i64 (v2f64 VR128:$src2))))],
2977 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2978 (loadv2i64 addr:$src2)))], 0>,
2982 let Constraints = "$src1 = $dst" in {
2983 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2984 !strconcat(OpcodeStr, "ps"), f128mem,
2985 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2986 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2987 (memopv2i64 addr:$src2)))]>, PS;
2989 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2990 !strconcat(OpcodeStr, "pd"), f128mem,
2991 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2992 (bc_v2i64 (v2f64 VR128:$src2))))],
2993 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2994 (memopv2i64 addr:$src2)))]>, PD;
2998 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2999 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
3000 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
3001 let isCommutable = 0 in
3002 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
3004 // AVX1 requires type coercions in order to fold loads directly into logical
3006 let Predicates = [HasAVX1Only] in {
3007 def : Pat<(bc_v8f32 (and VR256:$src1, (loadv4i64 addr:$src2))),
3008 (VANDPSYrm VR256:$src1, addr:$src2)>;
3009 def : Pat<(bc_v8f32 (or VR256:$src1, (loadv4i64 addr:$src2))),
3010 (VORPSYrm VR256:$src1, addr:$src2)>;
3011 def : Pat<(bc_v8f32 (xor VR256:$src1, (loadv4i64 addr:$src2))),
3012 (VXORPSYrm VR256:$src1, addr:$src2)>;
3013 def : Pat<(bc_v8f32 (X86andnp VR256:$src1, (loadv4i64 addr:$src2))),
3014 (VANDNPSYrm VR256:$src1, addr:$src2)>;
3017 //===----------------------------------------------------------------------===//
3018 // SSE 1 & 2 - Arithmetic Instructions
3019 //===----------------------------------------------------------------------===//
3021 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
3024 /// In addition, we also have a special variant of the scalar form here to
3025 /// represent the associated intrinsic operation. This form is unlike the
3026 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
3027 /// and leaves the top elements unmodified (therefore these cannot be commuted).
3029 /// These three forms can each be reg+reg or reg+mem.
3032 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
3034 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
3035 SDNode OpNode, SizeItins itins> {
3036 let Predicates = [HasAVX, NoVLX] in {
3037 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
3038 VR128, v4f32, f128mem, loadv4f32,
3039 SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
3040 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
3041 VR128, v2f64, f128mem, loadv2f64,
3042 SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
3044 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
3045 OpNode, VR256, v8f32, f256mem, loadv8f32,
3046 SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
3047 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
3048 OpNode, VR256, v4f64, f256mem, loadv4f64,
3049 SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
3052 let Constraints = "$src1 = $dst" in {
3053 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
3054 v4f32, f128mem, memopv4f32, SSEPackedSingle,
3056 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
3057 v2f64, f128mem, memopv2f64, SSEPackedDouble,
3062 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3064 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3065 OpNode, FR32, f32mem, itins.s, 0>, XS, VEX_4V, VEX_LIG;
3066 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3067 OpNode, FR64, f64mem, itins.d, 0>, XD, VEX_4V, VEX_LIG;
3069 let Constraints = "$src1 = $dst" in {
3070 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3071 OpNode, FR32, f32mem, itins.s>, XS;
3072 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3073 OpNode, FR64, f64mem, itins.d>, XD;
3077 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
3079 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3080 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3081 itins.s, 0>, XS, VEX_4V, VEX_LIG;
3082 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3083 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3084 itins.d, 0>, XD, VEX_4V, VEX_LIG;
3086 let Constraints = "$src1 = $dst" in {
3087 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3088 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3090 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3091 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3096 // Binary Arithmetic instructions
3097 defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
3098 basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
3099 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
3100 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
3101 basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
3102 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
3103 let isCommutable = 0 in {
3104 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
3105 basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
3106 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
3107 defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
3108 basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
3109 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
3110 defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
3111 basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
3112 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>;
3113 defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
3114 basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
3115 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>;
3118 let isCodeGenOnly = 1 in {
3119 defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>,
3120 basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>;
3121 defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>,
3122 basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
3125 // Patterns used to select SSE scalar fp arithmetic instructions from
3128 // (1) a scalar fp operation followed by a blend
3130 // The effect is that the backend no longer emits unnecessary vector
3131 // insert instructions immediately after SSE scalar fp instructions
3132 // like addss or mulss.
3134 // For example, given the following code:
3135 // __m128 foo(__m128 A, __m128 B) {
3140 // Previously we generated:
3141 // addss %xmm0, %xmm1
3142 // movss %xmm1, %xmm0
3145 // addss %xmm1, %xmm0
3147 // (2) a vector packed single/double fp operation followed by a vector insert
3149 // The effect is that the backend converts the packed fp instruction
3150 // followed by a vector insert into a single SSE scalar fp instruction.
3152 // For example, given the following code:
3153 // __m128 foo(__m128 A, __m128 B) {
3154 // __m128 C = A + B;
3155 // return (__m128) {c[0], a[1], a[2], a[3]};
3158 // Previously we generated:
3159 // addps %xmm0, %xmm1
3160 // movss %xmm1, %xmm0
3163 // addss %xmm1, %xmm0
3165 // TODO: Some canonicalization in lowering would simplify the number of
3166 // patterns we have to try to match. In particular, the reversed order blends
3167 // seem unnecessary.
3168 multiclass scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
3169 let Predicates = [UseSSE1] in {
3170 // extracted scalar math op with insert via movss
3171 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3172 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3174 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3175 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3177 // vector math op with insert via movss
3178 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3179 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3180 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3183 // With SSE 4.1, insertps/blendi are preferred to movsd, so match those too.
3184 let Predicates = [UseSSE41] in {
3185 // extracted scalar math op with insert via insertps
3186 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3187 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3188 FR32:$src))), (iPTR 0))),
3189 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3190 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3192 // extracted scalar math op with insert via blend
3193 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3194 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3195 FR32:$src))), (i8 1))),
3196 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3197 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3199 // vector math op with insert via blend
3200 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3201 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3202 (!cast<I>(OpcPrefix#SSrr_Int)v4f32:$dst, v4f32:$src)>;
3206 // Repeat everything for AVX, except for the movss + scalar combo...
3207 // because that one shouldn't occur with AVX codegen?
3208 let Predicates = [HasAVX] in {
3209 // extracted scalar math op with insert via insertps
3210 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3211 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3212 FR32:$src))), (iPTR 0))),
3213 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
3214 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3216 // extracted scalar math op with insert via blend
3217 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3218 (Op (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3219 FR32:$src))), (i8 1))),
3220 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
3221 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3223 // vector math op with insert via movss
3224 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3225 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3226 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3228 // vector math op with insert via blend
3229 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3230 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3231 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3235 defm : scalar_math_f32_patterns<fadd, "ADD">;
3236 defm : scalar_math_f32_patterns<fsub, "SUB">;
3237 defm : scalar_math_f32_patterns<fmul, "MUL">;
3238 defm : scalar_math_f32_patterns<fdiv, "DIV">;
3240 multiclass scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
3241 let Predicates = [UseSSE2] in {
3242 // extracted scalar math op with insert via movsd
3243 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3244 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3246 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3247 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3249 // vector math op with insert via movsd
3250 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3251 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3252 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3255 // With SSE 4.1, blendi is preferred to movsd, so match those too.
3256 let Predicates = [UseSSE41] in {
3257 // extracted scalar math op with insert via blend
3258 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3259 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3260 FR64:$src))), (i8 1))),
3261 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3262 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3264 // vector math op with insert via blend
3265 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3266 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3267 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3269 // vector math op with insert via blend (reversed order)
3270 def : Pat<(v2f64 (X86Blendi
3271 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3272 (v2f64 VR128:$dst), (i8 2))),
3273 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3276 // Repeat everything for AVX and add one more pattern
3277 // (the scalar + blend reversed order) for good measure.
3278 let Predicates = [HasAVX] in {
3279 // extracted scalar math op with insert via movsd
3280 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3281 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3283 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3284 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3286 // extracted scalar math op with insert via blend
3287 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3288 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3289 FR64:$src))), (i8 1))),
3290 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3291 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3293 // extracted scalar math op with insert via blend (reversed order)
3294 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector
3295 (Op (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3296 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3297 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3298 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3300 // vector math op with insert via movsd
3301 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3302 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3303 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3305 // vector math op with insert via blend
3306 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3307 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3308 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3310 // vector math op with insert via blend (reversed order)
3311 def : Pat<(v2f64 (X86Blendi
3312 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3313 (v2f64 VR128:$dst), (i8 2))),
3314 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3318 defm : scalar_math_f64_patterns<fadd, "ADD">;
3319 defm : scalar_math_f64_patterns<fsub, "SUB">;
3320 defm : scalar_math_f64_patterns<fmul, "MUL">;
3321 defm : scalar_math_f64_patterns<fdiv, "DIV">;
3325 /// In addition, we also have a special variant of the scalar form here to
3326 /// represent the associated intrinsic operation. This form is unlike the
3327 /// plain scalar form, in that it takes an entire vector (instead of a
3328 /// scalar) and leaves the top elements undefined.
3330 /// And, we have a special variant form for a full-vector intrinsic form.
3332 let Sched = WriteFSqrt in {
3333 def SSE_SQRTPS : OpndItins<
3334 IIC_SSE_SQRTPS_RR, IIC_SSE_SQRTPS_RM
3337 def SSE_SQRTSS : OpndItins<
3338 IIC_SSE_SQRTSS_RR, IIC_SSE_SQRTSS_RM
3341 def SSE_SQRTPD : OpndItins<
3342 IIC_SSE_SQRTPD_RR, IIC_SSE_SQRTPD_RM
3345 def SSE_SQRTSD : OpndItins<
3346 IIC_SSE_SQRTSD_RR, IIC_SSE_SQRTSD_RM
3350 let Sched = WriteFRsqrt in {
3351 def SSE_RSQRTPS : OpndItins<
3352 IIC_SSE_RSQRTPS_RR, IIC_SSE_RSQRTPS_RM
3355 def SSE_RSQRTSS : OpndItins<
3356 IIC_SSE_RSQRTSS_RR, IIC_SSE_RSQRTSS_RM
3360 let Sched = WriteFRcp in {
3361 def SSE_RCPP : OpndItins<
3362 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
3365 def SSE_RCPS : OpndItins<
3366 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
3370 /// sse1_fp_unop_s - SSE1 unops in scalar form
3371 /// For the non-AVX defs, we need $src1 to be tied to $dst because
3372 /// the HW instructions are 2 operand / destructive.
3373 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3375 let Predicates = [HasAVX], hasSideEffects = 0 in {
3376 def V#NAME#SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst),
3377 (ins FR32:$src1, FR32:$src2),
3378 !strconcat("v", OpcodeStr,
3379 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3380 []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
3381 let mayLoad = 1 in {
3382 def V#NAME#SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
3383 (ins FR32:$src1,f32mem:$src2),
3384 !strconcat("v", OpcodeStr,
3385 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3386 []>, VEX_4V, VEX_LIG,
3387 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3388 let isCodeGenOnly = 1 in
3389 def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3390 (ins VR128:$src1, ssmem:$src2),
3391 !strconcat("v", OpcodeStr,
3392 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3393 []>, VEX_4V, VEX_LIG,
3394 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3398 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
3399 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3400 [(set FR32:$dst, (OpNode FR32:$src))]>, Sched<[itins.Sched]>;
3401 // For scalar unary operations, fold a load into the operation
3402 // only in OptForSize mode. It eliminates an instruction, but it also
3403 // eliminates a whole-register clobber (the load), so it introduces a
3404 // partial register update condition.
3405 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
3406 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3407 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
3408 Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
3409 let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
3410 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
3411 (ins VR128:$src1, VR128:$src2),
3412 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3413 [], itins.rr>, Sched<[itins.Sched]>;
3414 let mayLoad = 1, hasSideEffects = 0 in
3415 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3416 (ins VR128:$src1, ssmem:$src2),
3417 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3418 [], itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3422 /// sse1_fp_unop_p - SSE1 unops in packed form.
3423 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3425 let Predicates = [HasAVX] in {
3426 def V#NAME#PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3427 !strconcat("v", OpcodeStr,
3428 "ps\t{$src, $dst|$dst, $src}"),
3429 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))],
3430 itins.rr>, VEX, Sched<[itins.Sched]>;
3431 def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3432 !strconcat("v", OpcodeStr,
3433 "ps\t{$src, $dst|$dst, $src}"),
3434 [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))],
3435 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3436 def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3437 !strconcat("v", OpcodeStr,
3438 "ps\t{$src, $dst|$dst, $src}"),
3439 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3440 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3441 def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3442 !strconcat("v", OpcodeStr,
3443 "ps\t{$src, $dst|$dst, $src}"),
3444 [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))],
3445 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3448 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3449 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3450 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>,
3451 Sched<[itins.Sched]>;
3452 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3453 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3454 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>,
3455 Sched<[itins.Sched.Folded]>;
3458 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
3459 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3460 Intrinsic V4F32Int, Intrinsic V8F32Int,
3462 let isCodeGenOnly = 1 in {
3463 let Predicates = [HasAVX] in {
3464 def V#NAME#PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3465 !strconcat("v", OpcodeStr,
3466 "ps\t{$src, $dst|$dst, $src}"),
3467 [(set VR128:$dst, (V4F32Int VR128:$src))],
3468 itins.rr>, VEX, Sched<[itins.Sched]>;
3469 def V#NAME#PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3470 !strconcat("v", OpcodeStr,
3471 "ps\t{$src, $dst|$dst, $src}"),
3472 [(set VR128:$dst, (V4F32Int (loadv4f32 addr:$src)))],
3473 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3474 def V#NAME#PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3475 !strconcat("v", OpcodeStr,
3476 "ps\t{$src, $dst|$dst, $src}"),
3477 [(set VR256:$dst, (V8F32Int VR256:$src))],
3478 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3479 def V#NAME#PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst),
3481 !strconcat("v", OpcodeStr,
3482 "ps\t{$src, $dst|$dst, $src}"),
3483 [(set VR256:$dst, (V8F32Int (loadv8f32 addr:$src)))],
3484 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3487 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3488 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3489 [(set VR128:$dst, (V4F32Int VR128:$src))],
3490 itins.rr>, Sched<[itins.Sched]>;
3491 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3492 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3493 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
3494 itins.rm>, Sched<[itins.Sched.Folded]>;
3495 } // isCodeGenOnly = 1
3498 /// sse2_fp_unop_s - SSE2 unops in scalar form.
3499 // FIXME: Combine the following sse2 classes with the sse1 classes above.
3500 // The only usage of these is for SQRT[S/P]D. See sse12_fp_binop* for example.
3501 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
3502 SDNode OpNode, OpndItins itins> {
3503 let Predicates = [HasAVX], hasSideEffects = 0 in {
3504 def V#NAME#SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst),
3505 (ins FR64:$src1, FR64:$src2),
3506 !strconcat("v", OpcodeStr,
3507 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3508 []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
3509 let mayLoad = 1 in {
3510 def V#NAME#SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
3511 (ins FR64:$src1,f64mem:$src2),
3512 !strconcat("v", OpcodeStr,
3513 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3514 []>, VEX_4V, VEX_LIG,
3515 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3516 let isCodeGenOnly = 1 in
3517 def V#NAME#SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
3518 (ins VR128:$src1, sdmem:$src2),
3519 !strconcat("v", OpcodeStr,
3520 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3521 []>, VEX_4V, VEX_LIG,
3522 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3526 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
3527 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3528 [(set FR64:$dst, (OpNode FR64:$src))], itins.rr>,
3529 Sched<[itins.Sched]>;
3530 // See the comments in sse1_fp_unop_s for why this is OptForSize.
3531 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
3532 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3533 [(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
3534 Requires<[UseSSE2, OptForSize]>, Sched<[itins.Sched.Folded]>;
3535 let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
3537 SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3538 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
3539 [], itins.rr>, Sched<[itins.Sched]>;
3541 let mayLoad = 1, hasSideEffects = 0 in
3543 SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
3544 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
3545 [], itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3546 } // isCodeGenOnly, Constraints
3549 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3550 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3551 SDNode OpNode, OpndItins itins> {
3552 let Predicates = [HasAVX] in {
3553 def V#NAME#PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3554 !strconcat("v", OpcodeStr,
3555 "pd\t{$src, $dst|$dst, $src}"),
3556 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))],
3557 itins.rr>, VEX, Sched<[itins.Sched]>;
3558 def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3559 !strconcat("v", OpcodeStr,
3560 "pd\t{$src, $dst|$dst, $src}"),
3561 [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))],
3562 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3563 def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3564 !strconcat("v", OpcodeStr,
3565 "pd\t{$src, $dst|$dst, $src}"),
3566 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3567 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3568 def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3569 !strconcat("v", OpcodeStr,
3570 "pd\t{$src, $dst|$dst, $src}"),
3571 [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))],
3572 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3575 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3576 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3577 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>,
3578 Sched<[itins.Sched]>;
3579 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3580 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3581 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>,
3582 Sched<[itins.Sched.Folded]>;
3586 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSS>,
3587 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS>,
3588 sse2_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSD>,
3589 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>;
3591 // Reciprocal approximations. Note that these typically require refinement
3592 // in order to obtain suitable precision.
3593 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, SSE_RSQRTSS>,
3594 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_RSQRTPS>,
3595 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
3596 int_x86_avx_rsqrt_ps_256, SSE_RSQRTPS>;
3597 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SSE_RCPS>,
3598 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP>,
3599 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps,
3600 int_x86_avx_rcp_ps_256, SSE_RCPP>;
3602 let Predicates = [UseAVX] in {
3603 def : Pat<(f32 (fsqrt FR32:$src)),
3604 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3605 def : Pat<(f32 (fsqrt (load addr:$src))),
3606 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3607 Requires<[HasAVX, OptForSize]>;
3608 def : Pat<(f64 (fsqrt FR64:$src)),
3609 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
3610 def : Pat<(f64 (fsqrt (load addr:$src))),
3611 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
3612 Requires<[HasAVX, OptForSize]>;
3614 def : Pat<(f32 (X86frsqrt FR32:$src)),
3615 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3616 def : Pat<(f32 (X86frsqrt (load addr:$src))),
3617 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3618 Requires<[HasAVX, OptForSize]>;
3620 def : Pat<(f32 (X86frcp FR32:$src)),
3621 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3622 def : Pat<(f32 (X86frcp (load addr:$src))),
3623 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3624 Requires<[HasAVX, OptForSize]>;
3626 let Predicates = [UseAVX] in {
3627 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
3628 (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
3629 (COPY_TO_REGCLASS VR128:$src, FR32)),
3631 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3632 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3634 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
3635 (COPY_TO_REGCLASS (VSQRTSDr (f64 (IMPLICIT_DEF)),
3636 (COPY_TO_REGCLASS VR128:$src, FR64)),
3638 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3639 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3642 let Predicates = [HasAVX] in {
3643 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3644 (COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
3645 (COPY_TO_REGCLASS VR128:$src, FR32)),
3647 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
3648 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3650 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3651 (COPY_TO_REGCLASS (VRCPSSr (f32 (IMPLICIT_DEF)),
3652 (COPY_TO_REGCLASS VR128:$src, FR32)),
3654 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
3655 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3658 // These are unary operations, but they are modeled as having 2 source operands
3659 // because the high elements of the destination are unchanged in SSE.
3660 let Predicates = [UseSSE1] in {
3661 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3662 (RSQRTSSr_Int VR128:$src, VR128:$src)>;
3663 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3664 (RCPSSr_Int VR128:$src, VR128:$src)>;
3665 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
3666 (SQRTSSr_Int VR128:$src, VR128:$src)>;
3667 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
3668 (SQRTSDr_Int VR128:$src, VR128:$src)>;
3671 // There is no f64 version of the reciprocal approximation instructions.
3673 //===----------------------------------------------------------------------===//
3674 // SSE 1 & 2 - Non-temporal stores
3675 //===----------------------------------------------------------------------===//
3677 let AddedComplexity = 400 in { // Prefer non-temporal versions
3678 let SchedRW = [WriteStore] in {
3679 let Predicates = [HasAVX, NoVLX] in {
3680 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3681 (ins f128mem:$dst, VR128:$src),
3682 "movntps\t{$src, $dst|$dst, $src}",
3683 [(alignednontemporalstore (v4f32 VR128:$src),
3685 IIC_SSE_MOVNT>, VEX;
3686 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3687 (ins f128mem:$dst, VR128:$src),
3688 "movntpd\t{$src, $dst|$dst, $src}",
3689 [(alignednontemporalstore (v2f64 VR128:$src),
3691 IIC_SSE_MOVNT>, VEX;
3693 let ExeDomain = SSEPackedInt in
3694 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3695 (ins f128mem:$dst, VR128:$src),
3696 "movntdq\t{$src, $dst|$dst, $src}",
3697 [(alignednontemporalstore (v2i64 VR128:$src),
3699 IIC_SSE_MOVNT>, VEX;
3701 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3702 (ins f256mem:$dst, VR256:$src),
3703 "movntps\t{$src, $dst|$dst, $src}",
3704 [(alignednontemporalstore (v8f32 VR256:$src),
3706 IIC_SSE_MOVNT>, VEX, VEX_L;
3707 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3708 (ins f256mem:$dst, VR256:$src),
3709 "movntpd\t{$src, $dst|$dst, $src}",
3710 [(alignednontemporalstore (v4f64 VR256:$src),
3712 IIC_SSE_MOVNT>, VEX, VEX_L;
3713 let ExeDomain = SSEPackedInt in
3714 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3715 (ins f256mem:$dst, VR256:$src),
3716 "movntdq\t{$src, $dst|$dst, $src}",
3717 [(alignednontemporalstore (v4i64 VR256:$src),
3719 IIC_SSE_MOVNT>, VEX, VEX_L;
3722 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3723 "movntps\t{$src, $dst|$dst, $src}",
3724 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3726 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3727 "movntpd\t{$src, $dst|$dst, $src}",
3728 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3731 let ExeDomain = SSEPackedInt in
3732 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3733 "movntdq\t{$src, $dst|$dst, $src}",
3734 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3737 // There is no AVX form for instructions below this point
3738 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3739 "movnti{l}\t{$src, $dst|$dst, $src}",
3740 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3742 PS, Requires<[HasSSE2]>;
3743 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3744 "movnti{q}\t{$src, $dst|$dst, $src}",
3745 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3747 PS, Requires<[HasSSE2]>;
3748 } // SchedRW = [WriteStore]
3750 let Predicates = [HasAVX, NoVLX] in {
3751 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3752 (VMOVNTPSmr addr:$dst, VR128:$src)>;
3755 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3756 (MOVNTPSmr addr:$dst, VR128:$src)>;
3758 } // AddedComplexity
3760 //===----------------------------------------------------------------------===//
3761 // SSE 1 & 2 - Prefetch and memory fence
3762 //===----------------------------------------------------------------------===//
3764 // Prefetch intrinsic.
3765 let Predicates = [HasSSE1], SchedRW = [WriteLoad] in {
3766 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3767 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3768 IIC_SSE_PREFETCH>, TB;
3769 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3770 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3771 IIC_SSE_PREFETCH>, TB;
3772 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3773 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3774 IIC_SSE_PREFETCH>, TB;
3775 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3776 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3777 IIC_SSE_PREFETCH>, TB;
3780 // FIXME: How should flush instruction be modeled?
3781 let SchedRW = [WriteLoad] in {
3783 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3784 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3785 IIC_SSE_PREFETCH>, PS, Requires<[HasSSE2]>;
3788 let SchedRW = [WriteNop] in {
3789 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3790 // was introduced with SSE2, it's backward compatible.
3791 def PAUSE : I<0x90, RawFrm, (outs), (ins),
3792 "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
3793 OBXS, Requires<[HasSSE2]>;
3796 let SchedRW = [WriteFence] in {
3797 // Load, store, and memory fence
3798 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3799 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
3800 PS, Requires<[HasSSE1]>;
3801 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3802 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
3803 TB, Requires<[HasSSE2]>;
3804 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3805 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
3806 TB, Requires<[HasSSE2]>;
3809 def : Pat<(X86SFence), (SFENCE)>;
3810 def : Pat<(X86LFence), (LFENCE)>;
3811 def : Pat<(X86MFence), (MFENCE)>;
3813 //===----------------------------------------------------------------------===//
3814 // SSE 1 & 2 - Load/Store XCSR register
3815 //===----------------------------------------------------------------------===//
3817 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3818 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3819 IIC_SSE_LDMXCSR>, VEX, Sched<[WriteLoad]>;
3820 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3821 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3822 IIC_SSE_STMXCSR>, VEX, Sched<[WriteStore]>;
3824 let Predicates = [UseSSE1] in {
3825 def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
3826 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3827 IIC_SSE_LDMXCSR>, TB, Sched<[WriteLoad]>;
3828 def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3829 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3830 IIC_SSE_STMXCSR>, TB, Sched<[WriteStore]>;
3833 //===---------------------------------------------------------------------===//
3834 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3835 //===---------------------------------------------------------------------===//
3837 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3839 let hasSideEffects = 0, SchedRW = [WriteMove] in {
3840 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3841 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3843 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3844 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3846 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3847 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3849 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3850 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3855 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
3856 SchedRW = [WriteMove] in {
3857 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3858 "movdqa\t{$src, $dst|$dst, $src}", [],
3861 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3862 "movdqa\t{$src, $dst|$dst, $src}", [],
3863 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
3864 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3865 "movdqu\t{$src, $dst|$dst, $src}", [],
3868 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3869 "movdqu\t{$src, $dst|$dst, $src}", [],
3870 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
3873 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3874 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3875 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3876 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3878 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3879 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3881 let Predicates = [HasAVX] in {
3882 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3883 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3885 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3886 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3891 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3892 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3893 (ins i128mem:$dst, VR128:$src),
3894 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3896 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3897 (ins i256mem:$dst, VR256:$src),
3898 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3900 let Predicates = [HasAVX] in {
3901 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3902 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3904 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3905 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3910 let SchedRW = [WriteMove] in {
3911 let hasSideEffects = 0 in
3912 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3913 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
3915 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3916 "movdqu\t{$src, $dst|$dst, $src}",
3917 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3920 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3921 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3922 "movdqa\t{$src, $dst|$dst, $src}", [],
3925 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3926 "movdqu\t{$src, $dst|$dst, $src}",
3927 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3931 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3932 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3933 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3934 "movdqa\t{$src, $dst|$dst, $src}",
3935 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
3937 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3938 "movdqu\t{$src, $dst|$dst, $src}",
3939 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
3941 XS, Requires<[UseSSE2]>;
3944 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3945 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3946 "movdqa\t{$src, $dst|$dst, $src}",
3947 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
3949 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3950 "movdqu\t{$src, $dst|$dst, $src}",
3951 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
3953 XS, Requires<[UseSSE2]>;
3956 } // ExeDomain = SSEPackedInt
3958 let Predicates = [HasAVX] in {
3959 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3960 (VMOVDQUmr addr:$dst, VR128:$src)>;
3961 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3962 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3964 let Predicates = [UseSSE2] in
3965 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3966 (MOVDQUmr addr:$dst, VR128:$src)>;
3968 //===---------------------------------------------------------------------===//
3969 // SSE2 - Packed Integer Arithmetic Instructions
3970 //===---------------------------------------------------------------------===//
3972 let Sched = WriteVecIMul in
3973 def SSE_PMADD : OpndItins<
3974 IIC_SSE_PMADD, IIC_SSE_PMADD
3977 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3979 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3980 RegisterClass RC, PatFrag memop_frag,
3981 X86MemOperand x86memop,
3983 bit IsCommutable = 0,
3985 let isCommutable = IsCommutable in
3986 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3987 (ins RC:$src1, RC:$src2),
3989 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3990 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3991 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>,
3992 Sched<[itins.Sched]>;
3993 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3994 (ins RC:$src1, x86memop:$src2),
3996 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3997 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3998 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
3999 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
4002 multiclass PDI_binop_all_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
4003 Intrinsic IntId256, OpndItins itins,
4004 bit IsCommutable = 0> {
4005 let Predicates = [HasAVX] in
4006 defm V#NAME : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId128,
4007 VR128, loadv2i64, i128mem, itins,
4008 IsCommutable, 0>, VEX_4V;
4010 let Constraints = "$src1 = $dst" in
4011 defm NAME : PDI_binop_rm_int<opc, OpcodeStr, IntId128, VR128, memopv2i64,
4012 i128mem, itins, IsCommutable, 1>;
4014 let Predicates = [HasAVX2] in
4015 defm V#NAME#Y : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId256,
4016 VR256, loadv4i64, i256mem, itins,
4017 IsCommutable, 0>, VEX_4V, VEX_L;
4020 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
4021 string OpcodeStr, SDNode OpNode,
4022 SDNode OpNode2, RegisterClass RC,
4023 ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
4024 PatFrag ld_frag, ShiftOpndItins itins,
4026 // src2 is always 128-bit
4027 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4028 (ins RC:$src1, VR128:$src2),
4030 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4031 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4032 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
4033 itins.rr>, Sched<[WriteVecShift]>;
4034 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4035 (ins RC:$src1, i128mem:$src2),
4037 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4038 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4039 [(set RC:$dst, (DstVT (OpNode RC:$src1,
4040 (bc_frag (ld_frag addr:$src2)))))], itins.rm>,
4041 Sched<[WriteVecShiftLd, ReadAfterLd]>;
4042 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
4043 (ins RC:$src1, u8imm:$src2),
4045 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4046 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4047 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 imm:$src2))))], itins.ri>,
4048 Sched<[WriteVecShift]>;
4051 /// PDI_binop_rm2 - Simple SSE2 binary operator with different src and dst types
4052 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
4053 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
4054 PatFrag memop_frag, X86MemOperand x86memop,
4056 bit IsCommutable = 0, bit Is2Addr = 1> {
4057 let isCommutable = IsCommutable in
4058 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4059 (ins RC:$src1, RC:$src2),
4061 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4062 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4063 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
4064 Sched<[itins.Sched]>;
4065 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4066 (ins RC:$src1, x86memop:$src2),
4068 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4069 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4070 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
4071 (bitconvert (memop_frag addr:$src2)))))]>,
4072 Sched<[itins.Sched.Folded, ReadAfterLd]>;
4074 } // ExeDomain = SSEPackedInt
4076 defm PADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
4077 SSE_INTALU_ITINS_P, 1>;
4078 defm PADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
4079 SSE_INTALU_ITINS_P, 1>;
4080 defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
4081 SSE_INTALU_ITINS_P, 1>;
4082 defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
4083 SSE_INTALUQ_ITINS_P, 1>;
4084 defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
4085 SSE_INTMUL_ITINS_P, 1>;
4086 defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
4087 SSE_INTMUL_ITINS_P, 1>;
4088 defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
4089 SSE_INTMUL_ITINS_P, 1>;
4090 defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
4091 SSE_INTALU_ITINS_P, 0>;
4092 defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
4093 SSE_INTALU_ITINS_P, 0>;
4094 defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
4095 SSE_INTALU_ITINS_P, 0>;
4096 defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
4097 SSE_INTALUQ_ITINS_P, 0>;
4098 defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
4099 SSE_INTALU_ITINS_P, 0>;
4100 defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
4101 SSE_INTALU_ITINS_P, 0>;
4102 defm PMINUB : PDI_binop_all<0xDA, "pminub", X86umin, v16i8, v32i8,
4103 SSE_INTALU_ITINS_P, 1>;
4104 defm PMINSW : PDI_binop_all<0xEA, "pminsw", X86smin, v8i16, v16i16,
4105 SSE_INTALU_ITINS_P, 1>;
4106 defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", X86umax, v16i8, v32i8,
4107 SSE_INTALU_ITINS_P, 1>;
4108 defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", X86smax, v8i16, v16i16,
4109 SSE_INTALU_ITINS_P, 1>;
4112 defm PSUBSB : PDI_binop_all_int<0xE8, "psubsb", int_x86_sse2_psubs_b,
4113 int_x86_avx2_psubs_b, SSE_INTALU_ITINS_P, 0>;
4114 defm PSUBSW : PDI_binop_all_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
4115 int_x86_avx2_psubs_w, SSE_INTALU_ITINS_P, 0>;
4116 defm PADDSB : PDI_binop_all_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
4117 int_x86_avx2_padds_b, SSE_INTALU_ITINS_P, 1>;
4118 defm PADDSW : PDI_binop_all_int<0xED, "paddsw" , int_x86_sse2_padds_w,
4119 int_x86_avx2_padds_w, SSE_INTALU_ITINS_P, 1>;
4120 defm PADDUSB : PDI_binop_all_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
4121 int_x86_avx2_paddus_b, SSE_INTALU_ITINS_P, 1>;
4122 defm PADDUSW : PDI_binop_all_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
4123 int_x86_avx2_paddus_w, SSE_INTALU_ITINS_P, 1>;
4124 defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
4125 int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
4126 defm PAVGB : PDI_binop_all_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
4127 int_x86_avx2_pavg_b, SSE_INTALU_ITINS_P, 1>;
4128 defm PAVGW : PDI_binop_all_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
4129 int_x86_avx2_pavg_w, SSE_INTALU_ITINS_P, 1>;
4130 defm PSADBW : PDI_binop_all_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
4131 int_x86_avx2_psad_bw, SSE_PMADD, 1>;
4133 let Predicates = [HasAVX] in
4134 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
4135 loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
4137 let Predicates = [HasAVX2] in
4138 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
4139 VR256, loadv4i64, i256mem,
4140 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4141 let Constraints = "$src1 = $dst" in
4142 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
4143 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
4145 //===---------------------------------------------------------------------===//
4146 // SSE2 - Packed Integer Logical Instructions
4147 //===---------------------------------------------------------------------===//
4149 let Predicates = [HasAVX] in {
4150 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4151 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4152 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4153 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4154 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4155 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4156 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4157 VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
4158 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4160 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4161 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4162 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4163 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4164 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4165 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4166 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4167 VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
4168 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4170 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4171 VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
4172 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4173 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4174 VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
4175 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4177 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
4178 // 128-bit logical shifts.
4179 def VPSLLDQri : PDIi8<0x73, MRM7r,
4180 (outs VR128:$dst), (ins VR128:$src1, i32u8imm:$src2),
4181 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4183 def VPSRLDQri : PDIi8<0x73, MRM3r,
4184 (outs VR128:$dst), (ins VR128:$src1, i32u8imm:$src2),
4185 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4187 // PSRADQri doesn't exist in SSE[1-3].
4189 } // Predicates = [HasAVX]
4191 let Predicates = [HasAVX2] in {
4192 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4193 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4194 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4195 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4196 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4197 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4198 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4199 VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
4200 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4202 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4203 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4204 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4205 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4206 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4207 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4208 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4209 VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
4210 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4212 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4213 VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
4214 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4215 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4216 VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
4217 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4219 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
4220 // 256-bit logical shifts.
4221 def VPSLLDQYri : PDIi8<0x73, MRM7r,
4222 (outs VR256:$dst), (ins VR256:$src1, i32u8imm:$src2),
4223 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4225 (int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>,
4227 def VPSRLDQYri : PDIi8<0x73, MRM3r,
4228 (outs VR256:$dst), (ins VR256:$src1, i32u8imm:$src2),
4229 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4231 (int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>,
4233 // PSRADQYri doesn't exist in SSE[1-3].
4235 } // Predicates = [HasAVX2]
4237 let Constraints = "$src1 = $dst" in {
4238 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
4239 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4240 SSE_INTSHIFT_ITINS_P>;
4241 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
4242 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4243 SSE_INTSHIFT_ITINS_P>;
4244 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
4245 VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
4246 SSE_INTSHIFT_ITINS_P>;
4248 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
4249 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4250 SSE_INTSHIFT_ITINS_P>;
4251 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
4252 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4253 SSE_INTSHIFT_ITINS_P>;
4254 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
4255 VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
4256 SSE_INTSHIFT_ITINS_P>;
4258 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
4259 VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
4260 SSE_INTSHIFT_ITINS_P>;
4261 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
4262 VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
4263 SSE_INTSHIFT_ITINS_P>;
4265 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
4266 // 128-bit logical shifts.
4267 def PSLLDQri : PDIi8<0x73, MRM7r,
4268 (outs VR128:$dst), (ins VR128:$src1, i32u8imm:$src2),
4269 "pslldq\t{$src2, $dst|$dst, $src2}",
4270 [], IIC_SSE_INTSHDQ_P_RI>;
4271 def PSRLDQri : PDIi8<0x73, MRM3r,
4272 (outs VR128:$dst), (ins VR128:$src1, i32u8imm:$src2),
4273 "psrldq\t{$src2, $dst|$dst, $src2}",
4274 [], IIC_SSE_INTSHDQ_P_RI>;
4275 // PSRADQri doesn't exist in SSE[1-3].
4277 } // Constraints = "$src1 = $dst"
4279 let Predicates = [HasAVX] in {
4280 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4281 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4282 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4283 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4284 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4285 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4287 // Shift up / down and insert zero's.
4288 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4289 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4290 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4291 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4294 let Predicates = [HasAVX2] in {
4295 def : Pat<(int_x86_avx2_psll_dq VR256:$src1, imm:$src2),
4296 (VPSLLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4297 def : Pat<(int_x86_avx2_psrl_dq VR256:$src1, imm:$src2),
4298 (VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4301 let Predicates = [UseSSE2] in {
4302 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4303 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4304 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4305 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4306 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4307 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4309 // Shift up / down and insert zero's.
4310 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4311 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4312 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4313 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4316 //===---------------------------------------------------------------------===//
4317 // SSE2 - Packed Integer Comparison Instructions
4318 //===---------------------------------------------------------------------===//
4320 defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
4321 SSE_INTALU_ITINS_P, 1>;
4322 defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
4323 SSE_INTALU_ITINS_P, 1>;
4324 defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
4325 SSE_INTALU_ITINS_P, 1>;
4326 defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
4327 SSE_INTALU_ITINS_P, 0>;
4328 defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
4329 SSE_INTALU_ITINS_P, 0>;
4330 defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
4331 SSE_INTALU_ITINS_P, 0>;
4333 //===---------------------------------------------------------------------===//
4334 // SSE2 - Packed Integer Shuffle Instructions
4335 //===---------------------------------------------------------------------===//
4337 let ExeDomain = SSEPackedInt in {
4338 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
4340 let Predicates = [HasAVX] in {
4341 def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
4342 (ins VR128:$src1, u8imm:$src2),
4343 !strconcat("v", OpcodeStr,
4344 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4346 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4347 IIC_SSE_PSHUF_RI>, VEX, Sched<[WriteShuffle]>;
4348 def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
4349 (ins i128mem:$src1, u8imm:$src2),
4350 !strconcat("v", OpcodeStr,
4351 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4353 (vt128 (OpNode (bitconvert (loadv2i64 addr:$src1)),
4354 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX,
4355 Sched<[WriteShuffleLd]>;
4358 let Predicates = [HasAVX2] in {
4359 def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
4360 (ins VR256:$src1, u8imm:$src2),
4361 !strconcat("v", OpcodeStr,
4362 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4364 (vt256 (OpNode VR256:$src1, (i8 imm:$src2))))],
4365 IIC_SSE_PSHUF_RI>, VEX, VEX_L, Sched<[WriteShuffle]>;
4366 def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
4367 (ins i256mem:$src1, u8imm:$src2),
4368 !strconcat("v", OpcodeStr,
4369 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4371 (vt256 (OpNode (bitconvert (loadv4i64 addr:$src1)),
4372 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX, VEX_L,
4373 Sched<[WriteShuffleLd]>;
4376 let Predicates = [UseSSE2] in {
4377 def ri : Ii8<0x70, MRMSrcReg,
4378 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4379 !strconcat(OpcodeStr,
4380 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4382 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4383 IIC_SSE_PSHUF_RI>, Sched<[WriteShuffle]>;
4384 def mi : Ii8<0x70, MRMSrcMem,
4385 (outs VR128:$dst), (ins i128mem:$src1, u8imm:$src2),
4386 !strconcat(OpcodeStr,
4387 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4389 (vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
4390 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>,
4391 Sched<[WriteShuffleLd, ReadAfterLd]>;
4394 } // ExeDomain = SSEPackedInt
4396 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, PD;
4397 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
4398 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
4400 let Predicates = [HasAVX] in {
4401 def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
4402 (VPSHUFDmi addr:$src1, imm:$imm)>;
4403 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4404 (VPSHUFDri VR128:$src1, imm:$imm)>;
4407 let Predicates = [UseSSE2] in {
4408 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4409 (PSHUFDmi addr:$src1, imm:$imm)>;
4410 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4411 (PSHUFDri VR128:$src1, imm:$imm)>;
4414 //===---------------------------------------------------------------------===//
4415 // Packed Integer Pack Instructions (SSE & AVX)
4416 //===---------------------------------------------------------------------===//
4418 let ExeDomain = SSEPackedInt in {
4419 multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4420 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4421 PatFrag ld_frag, bit Is2Addr = 1> {
4422 def rr : PDI<opc, MRMSrcReg,
4423 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4425 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4426 !strconcat(OpcodeStr,
4427 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4429 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4430 Sched<[WriteShuffle]>;
4431 def rm : PDI<opc, MRMSrcMem,
4432 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4434 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4435 !strconcat(OpcodeStr,
4436 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4438 (OutVT (OpNode VR128:$src1,
4439 (bc_frag (ld_frag addr:$src2)))))]>,
4440 Sched<[WriteShuffleLd, ReadAfterLd]>;
4443 multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4444 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4445 def Yrr : PDI<opc, MRMSrcReg,
4446 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4447 !strconcat(OpcodeStr,
4448 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4450 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4451 Sched<[WriteShuffle]>;
4452 def Yrm : PDI<opc, MRMSrcMem,
4453 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4454 !strconcat(OpcodeStr,
4455 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4457 (OutVT (OpNode VR256:$src1,
4458 (bc_frag (loadv4i64 addr:$src2)))))]>,
4459 Sched<[WriteShuffleLd, ReadAfterLd]>;
4462 multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4463 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4464 PatFrag ld_frag, bit Is2Addr = 1> {
4465 def rr : SS48I<opc, MRMSrcReg,
4466 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4468 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4469 !strconcat(OpcodeStr,
4470 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4472 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4473 Sched<[WriteShuffle]>;
4474 def rm : SS48I<opc, MRMSrcMem,
4475 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4477 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4478 !strconcat(OpcodeStr,
4479 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4481 (OutVT (OpNode VR128:$src1,
4482 (bc_frag (ld_frag addr:$src2)))))]>,
4483 Sched<[WriteShuffleLd, ReadAfterLd]>;
4486 multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4487 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4488 def Yrr : SS48I<opc, MRMSrcReg,
4489 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4490 !strconcat(OpcodeStr,
4491 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4493 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4494 Sched<[WriteShuffle]>;
4495 def Yrm : SS48I<opc, MRMSrcMem,
4496 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4497 !strconcat(OpcodeStr,
4498 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4500 (OutVT (OpNode VR256:$src1,
4501 (bc_frag (loadv4i64 addr:$src2)))))]>,
4502 Sched<[WriteShuffleLd, ReadAfterLd]>;
4505 let Predicates = [HasAVX] in {
4506 defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
4507 bc_v8i16, loadv2i64, 0>, VEX_4V;
4508 defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
4509 bc_v4i32, loadv2i64, 0>, VEX_4V;
4511 defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
4512 bc_v8i16, loadv2i64, 0>, VEX_4V;
4513 defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
4514 bc_v4i32, loadv2i64, 0>, VEX_4V;
4517 let Predicates = [HasAVX2] in {
4518 defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss,
4519 bc_v16i16>, VEX_4V, VEX_L;
4520 defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss,
4521 bc_v8i32>, VEX_4V, VEX_L;
4523 defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus,
4524 bc_v16i16>, VEX_4V, VEX_L;
4525 defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus,
4526 bc_v8i32>, VEX_4V, VEX_L;
4529 let Constraints = "$src1 = $dst" in {
4530 defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
4531 bc_v8i16, memopv2i64>;
4532 defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
4533 bc_v4i32, memopv2i64>;
4535 defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
4536 bc_v8i16, memopv2i64>;
4538 let Predicates = [HasSSE41] in
4539 defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
4540 bc_v4i32, memopv2i64>;
4542 } // ExeDomain = SSEPackedInt
4544 //===---------------------------------------------------------------------===//
4545 // SSE2 - Packed Integer Unpack Instructions
4546 //===---------------------------------------------------------------------===//
4548 let ExeDomain = SSEPackedInt in {
4549 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4550 SDNode OpNode, PatFrag bc_frag, PatFrag ld_frag,
4552 def rr : PDI<opc, MRMSrcReg,
4553 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4555 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4556 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4557 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4558 IIC_SSE_UNPCK>, Sched<[WriteShuffle]>;
4559 def rm : PDI<opc, MRMSrcMem,
4560 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4562 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4563 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4564 [(set VR128:$dst, (OpNode VR128:$src1,
4565 (bc_frag (ld_frag addr:$src2))))],
4567 Sched<[WriteShuffleLd, ReadAfterLd]>;
4570 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4571 SDNode OpNode, PatFrag bc_frag> {
4572 def Yrr : PDI<opc, MRMSrcReg,
4573 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4574 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4575 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>,
4576 Sched<[WriteShuffle]>;
4577 def Yrm : PDI<opc, MRMSrcMem,
4578 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4579 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4580 [(set VR256:$dst, (OpNode VR256:$src1,
4581 (bc_frag (loadv4i64 addr:$src2))))]>,
4582 Sched<[WriteShuffleLd, ReadAfterLd]>;
4585 let Predicates = [HasAVX] in {
4586 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4587 bc_v16i8, loadv2i64, 0>, VEX_4V;
4588 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4589 bc_v8i16, loadv2i64, 0>, VEX_4V;
4590 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4591 bc_v4i32, loadv2i64, 0>, VEX_4V;
4592 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4593 bc_v2i64, loadv2i64, 0>, VEX_4V;
4595 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4596 bc_v16i8, loadv2i64, 0>, VEX_4V;
4597 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4598 bc_v8i16, loadv2i64, 0>, VEX_4V;
4599 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4600 bc_v4i32, loadv2i64, 0>, VEX_4V;
4601 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4602 bc_v2i64, loadv2i64, 0>, VEX_4V;
4605 let Predicates = [HasAVX2] in {
4606 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
4607 bc_v32i8>, VEX_4V, VEX_L;
4608 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
4609 bc_v16i16>, VEX_4V, VEX_L;
4610 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
4611 bc_v8i32>, VEX_4V, VEX_L;
4612 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
4613 bc_v4i64>, VEX_4V, VEX_L;
4615 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
4616 bc_v32i8>, VEX_4V, VEX_L;
4617 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
4618 bc_v16i16>, VEX_4V, VEX_L;
4619 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
4620 bc_v8i32>, VEX_4V, VEX_L;
4621 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
4622 bc_v4i64>, VEX_4V, VEX_L;
4625 let Constraints = "$src1 = $dst" in {
4626 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4627 bc_v16i8, memopv2i64>;
4628 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4629 bc_v8i16, memopv2i64>;
4630 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4631 bc_v4i32, memopv2i64>;
4632 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4633 bc_v2i64, memopv2i64>;
4635 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4636 bc_v16i8, memopv2i64>;
4637 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4638 bc_v8i16, memopv2i64>;
4639 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4640 bc_v4i32, memopv2i64>;
4641 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4642 bc_v2i64, memopv2i64>;
4644 } // ExeDomain = SSEPackedInt
4646 //===---------------------------------------------------------------------===//
4647 // SSE2 - Packed Integer Extract and Insert
4648 //===---------------------------------------------------------------------===//
4650 let ExeDomain = SSEPackedInt in {
4651 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4652 def rri : Ii8<0xC4, MRMSrcReg,
4653 (outs VR128:$dst), (ins VR128:$src1,
4654 GR32orGR64:$src2, u8imm:$src3),
4656 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4657 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4659 (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
4660 IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
4661 def rmi : Ii8<0xC4, MRMSrcMem,
4662 (outs VR128:$dst), (ins VR128:$src1,
4663 i16mem:$src2, u8imm:$src3),
4665 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4666 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4668 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4669 imm:$src3))], IIC_SSE_PINSRW>,
4670 Sched<[WriteShuffleLd, ReadAfterLd]>;
4674 let Predicates = [HasAVX] in
4675 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4676 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4677 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4678 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4679 imm:$src2))]>, PD, VEX,
4680 Sched<[WriteShuffle]>;
4681 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4682 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4683 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4684 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4685 imm:$src2))], IIC_SSE_PEXTRW>,
4686 Sched<[WriteShuffleLd, ReadAfterLd]>;
4689 let Predicates = [HasAVX] in
4690 defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
4692 let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
4693 defm PINSRW : sse2_pinsrw, PD;
4695 } // ExeDomain = SSEPackedInt
4697 //===---------------------------------------------------------------------===//
4698 // SSE2 - Packed Mask Creation
4699 //===---------------------------------------------------------------------===//
4701 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
4703 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4705 "pmovmskb\t{$src, $dst|$dst, $src}",
4706 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4707 IIC_SSE_MOVMSK>, VEX;
4709 let Predicates = [HasAVX2] in {
4710 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4712 "pmovmskb\t{$src, $dst|$dst, $src}",
4713 [(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
4717 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
4718 "pmovmskb\t{$src, $dst|$dst, $src}",
4719 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4722 } // ExeDomain = SSEPackedInt
4724 //===---------------------------------------------------------------------===//
4725 // SSE2 - Conditional Store
4726 //===---------------------------------------------------------------------===//
4728 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
4730 let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
4731 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4732 (ins VR128:$src, VR128:$mask),
4733 "maskmovdqu\t{$mask, $src|$src, $mask}",
4734 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4735 IIC_SSE_MASKMOV>, VEX;
4736 let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
4737 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4738 (ins VR128:$src, VR128:$mask),
4739 "maskmovdqu\t{$mask, $src|$src, $mask}",
4740 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4741 IIC_SSE_MASKMOV>, VEX;
4743 let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
4744 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4745 "maskmovdqu\t{$mask, $src|$src, $mask}",
4746 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4748 let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
4749 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4750 "maskmovdqu\t{$mask, $src|$src, $mask}",
4751 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4754 } // ExeDomain = SSEPackedInt
4756 //===---------------------------------------------------------------------===//
4757 // SSE2 - Move Doubleword
4758 //===---------------------------------------------------------------------===//
4760 //===---------------------------------------------------------------------===//
4761 // Move Int Doubleword to Packed Double Int
4763 def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4764 "movd\t{$src, $dst|$dst, $src}",
4766 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4767 VEX, Sched<[WriteMove]>;
4768 def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4769 "movd\t{$src, $dst|$dst, $src}",
4771 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4773 VEX, Sched<[WriteLoad]>;
4774 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4775 "movq\t{$src, $dst|$dst, $src}",
4777 (v2i64 (scalar_to_vector GR64:$src)))],
4778 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4779 let isCodeGenOnly = 1 in
4780 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4781 "movq\t{$src, $dst|$dst, $src}",
4782 [(set FR64:$dst, (bitconvert GR64:$src))],
4783 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4785 def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4786 "movd\t{$src, $dst|$dst, $src}",
4788 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4790 def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4791 "movd\t{$src, $dst|$dst, $src}",
4793 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4794 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4795 def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4796 "mov{d|q}\t{$src, $dst|$dst, $src}",
4798 (v2i64 (scalar_to_vector GR64:$src)))],
4799 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4800 let isCodeGenOnly = 1 in
4801 def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4802 "mov{d|q}\t{$src, $dst|$dst, $src}",
4803 [(set FR64:$dst, (bitconvert GR64:$src))],
4804 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4806 //===---------------------------------------------------------------------===//
4807 // Move Int Doubleword to Single Scalar
4809 let isCodeGenOnly = 1 in {
4810 def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4811 "movd\t{$src, $dst|$dst, $src}",
4812 [(set FR32:$dst, (bitconvert GR32:$src))],
4813 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4815 def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4816 "movd\t{$src, $dst|$dst, $src}",
4817 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4819 VEX, Sched<[WriteLoad]>;
4820 def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4821 "movd\t{$src, $dst|$dst, $src}",
4822 [(set FR32:$dst, (bitconvert GR32:$src))],
4823 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4825 def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4826 "movd\t{$src, $dst|$dst, $src}",
4827 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4828 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4831 //===---------------------------------------------------------------------===//
4832 // Move Packed Doubleword Int to Packed Double Int
4834 def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4835 "movd\t{$src, $dst|$dst, $src}",
4836 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4837 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX,
4839 def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
4840 (ins i32mem:$dst, VR128:$src),
4841 "movd\t{$src, $dst|$dst, $src}",
4842 [(store (i32 (vector_extract (v4i32 VR128:$src),
4843 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
4844 VEX, Sched<[WriteStore]>;
4845 def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4846 "movd\t{$src, $dst|$dst, $src}",
4847 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4848 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
4850 def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4851 "movd\t{$src, $dst|$dst, $src}",
4852 [(store (i32 (vector_extract (v4i32 VR128:$src),
4853 (iPTR 0))), addr:$dst)],
4854 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4856 def : Pat<(v8i32 (X86Vinsert (v8i32 immAllZerosV), GR32:$src2, (iPTR 0))),
4857 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4859 def : Pat<(v4i64 (X86Vinsert (bc_v4i64 (v8i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
4860 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4862 def : Pat<(v8i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
4863 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4865 def : Pat<(v4i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
4866 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4868 //===---------------------------------------------------------------------===//
4869 // Move Packed Doubleword Int first element to Doubleword Int
4871 let SchedRW = [WriteMove] in {
4872 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4873 "movq\t{$src, $dst|$dst, $src}",
4874 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4879 def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4880 "mov{d|q}\t{$src, $dst|$dst, $src}",
4881 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4886 //===---------------------------------------------------------------------===//
4887 // Bitcast FR64 <-> GR64
4889 let isCodeGenOnly = 1 in {
4890 let Predicates = [UseAVX] in
4891 def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4892 "movq\t{$src, $dst|$dst, $src}",
4893 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4894 VEX, Sched<[WriteLoad]>;
4895 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4896 "movq\t{$src, $dst|$dst, $src}",
4897 [(set GR64:$dst, (bitconvert FR64:$src))],
4898 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4899 def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4900 "movq\t{$src, $dst|$dst, $src}",
4901 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4902 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4904 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4905 "movq\t{$src, $dst|$dst, $src}",
4906 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
4907 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4908 def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4909 "mov{d|q}\t{$src, $dst|$dst, $src}",
4910 [(set GR64:$dst, (bitconvert FR64:$src))],
4911 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4912 def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4913 "movq\t{$src, $dst|$dst, $src}",
4914 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4915 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4918 //===---------------------------------------------------------------------===//
4919 // Move Scalar Single to Double Int
4921 let isCodeGenOnly = 1 in {
4922 def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4923 "movd\t{$src, $dst|$dst, $src}",
4924 [(set GR32:$dst, (bitconvert FR32:$src))],
4925 IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
4926 def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4927 "movd\t{$src, $dst|$dst, $src}",
4928 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4929 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4930 def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4931 "movd\t{$src, $dst|$dst, $src}",
4932 [(set GR32:$dst, (bitconvert FR32:$src))],
4933 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4934 def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4935 "movd\t{$src, $dst|$dst, $src}",
4936 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4937 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4940 //===---------------------------------------------------------------------===//
4941 // Patterns and instructions to describe movd/movq to XMM register zero-extends
4943 let isCodeGenOnly = 1, SchedRW = [WriteMove] in {
4944 let AddedComplexity = 15 in {
4945 def VMOVZQI2PQIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4946 "movq\t{$src, $dst|$dst, $src}", // X86-64 only
4947 [(set VR128:$dst, (v2i64 (X86vzmovl
4948 (v2i64 (scalar_to_vector GR64:$src)))))],
4951 def MOVZQI2PQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4952 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4953 [(set VR128:$dst, (v2i64 (X86vzmovl
4954 (v2i64 (scalar_to_vector GR64:$src)))))],
4957 } // isCodeGenOnly, SchedRW
4959 let Predicates = [UseAVX] in {
4960 let AddedComplexity = 15 in
4961 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4962 (VMOVDI2PDIrr GR32:$src)>;
4964 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
4965 let AddedComplexity = 20 in {
4966 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4967 (VMOVDI2PDIrm addr:$src)>;
4968 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4969 (VMOVDI2PDIrm addr:$src)>;
4970 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4971 (VMOVDI2PDIrm addr:$src)>;
4973 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4974 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4975 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
4976 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src), sub_xmm)>;
4977 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4978 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
4979 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
4982 let Predicates = [UseSSE2] in {
4983 let AddedComplexity = 15 in
4984 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4985 (MOVDI2PDIrr GR32:$src)>;
4987 let AddedComplexity = 20 in {
4988 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4989 (MOVDI2PDIrm addr:$src)>;
4990 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4991 (MOVDI2PDIrm addr:$src)>;
4992 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4993 (MOVDI2PDIrm addr:$src)>;
4997 // These are the correct encodings of the instructions so that we know how to
4998 // read correct assembly, even though we continue to emit the wrong ones for
4999 // compatibility with Darwin's buggy assembler.
5000 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
5001 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
5002 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
5003 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
5004 // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
5005 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
5006 (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
5007 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
5008 (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
5010 //===---------------------------------------------------------------------===//
5011 // SSE2 - Move Quadword
5012 //===---------------------------------------------------------------------===//
5014 //===---------------------------------------------------------------------===//
5015 // Move Quadword Int to Packed Quadword Int
5018 let SchedRW = [WriteLoad] in {
5019 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5020 "vmovq\t{$src, $dst|$dst, $src}",
5022 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
5023 VEX, Requires<[UseAVX]>;
5024 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5025 "movq\t{$src, $dst|$dst, $src}",
5027 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
5029 Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
5032 //===---------------------------------------------------------------------===//
5033 // Move Packed Quadword Int to Quadword Int
5035 let SchedRW = [WriteStore] in {
5036 def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5037 "movq\t{$src, $dst|$dst, $src}",
5038 [(store (i64 (vector_extract (v2i64 VR128:$src),
5039 (iPTR 0))), addr:$dst)],
5040 IIC_SSE_MOVDQ>, VEX;
5041 def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5042 "movq\t{$src, $dst|$dst, $src}",
5043 [(store (i64 (vector_extract (v2i64 VR128:$src),
5044 (iPTR 0))), addr:$dst)],
5048 // For disassembler only
5049 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
5050 SchedRW = [WriteVecLogic] in {
5051 def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5052 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, VEX;
5053 def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5054 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
5057 //===---------------------------------------------------------------------===//
5058 // Store / copy lower 64-bits of a XMM register.
5060 let Predicates = [UseAVX] in
5061 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5062 (VMOVPQI2QImr addr:$dst, VR128:$src)>;
5063 let Predicates = [UseSSE2] in
5064 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5065 (MOVPQI2QImr addr:$dst, VR128:$src)>;
5067 let isCodeGenOnly = 1, AddedComplexity = 20 in {
5068 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5069 "vmovq\t{$src, $dst|$dst, $src}",
5071 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5072 (loadi64 addr:$src))))))],
5074 XS, VEX, Requires<[UseAVX]>, Sched<[WriteLoad]>;
5076 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5077 "movq\t{$src, $dst|$dst, $src}",
5079 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5080 (loadi64 addr:$src))))))],
5082 XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>;
5085 let Predicates = [UseAVX], AddedComplexity = 20 in {
5086 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5087 (VMOVZQI2PQIrm addr:$src)>;
5088 def : Pat<(v2i64 (X86vzload addr:$src)),
5089 (VMOVZQI2PQIrm addr:$src)>;
5092 let Predicates = [UseSSE2], AddedComplexity = 20 in {
5093 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5094 (MOVZQI2PQIrm addr:$src)>;
5095 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
5098 let Predicates = [HasAVX] in {
5099 def : Pat<(v4i64 (alignedX86vzload addr:$src)),
5100 (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
5101 def : Pat<(v4i64 (X86vzload addr:$src)),
5102 (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
5105 //===---------------------------------------------------------------------===//
5106 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
5107 // IA32 document. movq xmm1, xmm2 does clear the high bits.
5109 let SchedRW = [WriteVecLogic] in {
5110 let AddedComplexity = 15 in
5111 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5112 "vmovq\t{$src, $dst|$dst, $src}",
5113 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5115 XS, VEX, Requires<[UseAVX]>;
5116 let AddedComplexity = 15 in
5117 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5118 "movq\t{$src, $dst|$dst, $src}",
5119 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5121 XS, Requires<[UseSSE2]>;
5124 let isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
5125 let AddedComplexity = 20 in
5126 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5127 "vmovq\t{$src, $dst|$dst, $src}",
5128 [(set VR128:$dst, (v2i64 (X86vzmovl
5129 (loadv2i64 addr:$src))))],
5131 XS, VEX, Requires<[UseAVX]>;
5132 let AddedComplexity = 20 in {
5133 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5134 "movq\t{$src, $dst|$dst, $src}",
5135 [(set VR128:$dst, (v2i64 (X86vzmovl
5136 (loadv2i64 addr:$src))))],
5138 XS, Requires<[UseSSE2]>;
5140 } // isCodeGenOnly, SchedRW
5142 let AddedComplexity = 20 in {
5143 let Predicates = [UseAVX] in {
5144 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5145 (VMOVZPQILo2PQIrr VR128:$src)>;
5147 let Predicates = [UseSSE2] in {
5148 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5149 (MOVZPQILo2PQIrr VR128:$src)>;
5153 //===---------------------------------------------------------------------===//
5154 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
5155 //===---------------------------------------------------------------------===//
5156 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
5157 ValueType vt, RegisterClass RC, PatFrag mem_frag,
5158 X86MemOperand x86memop> {
5159 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
5160 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5161 [(set RC:$dst, (vt (OpNode RC:$src)))],
5162 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5163 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5164 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5165 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
5166 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5169 let Predicates = [HasAVX] in {
5170 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5171 v4f32, VR128, loadv4f32, f128mem>, VEX;
5172 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5173 v4f32, VR128, loadv4f32, f128mem>, VEX;
5174 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5175 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5176 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5177 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5179 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
5180 memopv4f32, f128mem>;
5181 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
5182 memopv4f32, f128mem>;
5184 let Predicates = [HasAVX] in {
5185 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5186 (VMOVSHDUPrr VR128:$src)>;
5187 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (loadv2i64 addr:$src)))),
5188 (VMOVSHDUPrm addr:$src)>;
5189 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5190 (VMOVSLDUPrr VR128:$src)>;
5191 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (loadv2i64 addr:$src)))),
5192 (VMOVSLDUPrm addr:$src)>;
5193 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
5194 (VMOVSHDUPYrr VR256:$src)>;
5195 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (loadv4i64 addr:$src)))),
5196 (VMOVSHDUPYrm addr:$src)>;
5197 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
5198 (VMOVSLDUPYrr VR256:$src)>;
5199 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (loadv4i64 addr:$src)))),
5200 (VMOVSLDUPYrm addr:$src)>;
5203 let Predicates = [UseSSE3] in {
5204 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5205 (MOVSHDUPrr VR128:$src)>;
5206 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
5207 (MOVSHDUPrm addr:$src)>;
5208 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5209 (MOVSLDUPrr VR128:$src)>;
5210 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
5211 (MOVSLDUPrm addr:$src)>;
5214 //===---------------------------------------------------------------------===//
5215 // SSE3 - Replicate Double FP - MOVDDUP
5216 //===---------------------------------------------------------------------===//
5218 multiclass sse3_replicate_dfp<string OpcodeStr> {
5219 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5220 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5221 [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))],
5222 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5223 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
5224 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5227 (scalar_to_vector (loadf64 addr:$src)))))],
5228 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5231 // FIXME: Merge with above classe when there're patterns for the ymm version
5232 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
5233 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
5234 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5235 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
5236 Sched<[WriteFShuffle]>;
5237 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
5238 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5241 (scalar_to_vector (loadf64 addr:$src)))))]>,
5245 let Predicates = [HasAVX] in {
5246 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
5247 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
5250 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
5252 let Predicates = [HasAVX] in {
5253 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
5254 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5255 def : Pat<(X86Movddup (bc_v2f64 (loadv4f32 addr:$src))),
5256 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5257 def : Pat<(X86Movddup (bc_v2f64 (loadv2i64 addr:$src))),
5258 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5259 def : Pat<(X86Movddup (bc_v2f64
5260 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5261 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5264 def : Pat<(X86Movddup (loadv4f64 addr:$src)),
5265 (VMOVDDUPYrm addr:$src)>;
5266 def : Pat<(X86Movddup (loadv4i64 addr:$src)),
5267 (VMOVDDUPYrm addr:$src)>;
5268 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
5269 (VMOVDDUPYrm addr:$src)>;
5270 def : Pat<(X86Movddup (v4i64 VR256:$src)),
5271 (VMOVDDUPYrr VR256:$src)>;
5274 let Predicates = [UseAVX, OptForSize] in {
5275 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
5276 (VMOVDDUPrm addr:$src)>;
5277 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
5278 (VMOVDDUPrm addr:$src)>;
5281 let Predicates = [UseSSE3] in {
5282 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5283 (MOVDDUPrm addr:$src)>;
5284 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5285 (MOVDDUPrm addr:$src)>;
5286 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5287 (MOVDDUPrm addr:$src)>;
5288 def : Pat<(X86Movddup (bc_v2f64
5289 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5290 (MOVDDUPrm addr:$src)>;
5293 //===---------------------------------------------------------------------===//
5294 // SSE3 - Move Unaligned Integer
5295 //===---------------------------------------------------------------------===//
5297 let SchedRW = [WriteLoad] in {
5298 let Predicates = [HasAVX] in {
5299 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5300 "vlddqu\t{$src, $dst|$dst, $src}",
5301 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
5302 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
5303 "vlddqu\t{$src, $dst|$dst, $src}",
5304 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
5307 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5308 "lddqu\t{$src, $dst|$dst, $src}",
5309 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
5313 //===---------------------------------------------------------------------===//
5314 // SSE3 - Arithmetic
5315 //===---------------------------------------------------------------------===//
5317 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
5318 X86MemOperand x86memop, OpndItins itins,
5319 PatFrag ld_frag, bit Is2Addr = 1> {
5320 def rr : I<0xD0, MRMSrcReg,
5321 (outs RC:$dst), (ins RC:$src1, RC:$src2),
5323 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5324 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5325 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>,
5326 Sched<[itins.Sched]>;
5327 def rm : I<0xD0, MRMSrcMem,
5328 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5330 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5331 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5332 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))], itins.rr>,
5333 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5336 let Predicates = [HasAVX] in {
5337 let ExeDomain = SSEPackedSingle in {
5338 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
5339 f128mem, SSE_ALU_F32P, loadv4f32, 0>, XD, VEX_4V;
5340 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
5341 f256mem, SSE_ALU_F32P, loadv8f32, 0>, XD, VEX_4V, VEX_L;
5343 let ExeDomain = SSEPackedDouble in {
5344 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5345 f128mem, SSE_ALU_F64P, loadv2f64, 0>, PD, VEX_4V;
5346 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5347 f256mem, SSE_ALU_F64P, loadv4f64, 0>, PD, VEX_4V, VEX_L;
5350 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
5351 let ExeDomain = SSEPackedSingle in
5352 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5353 f128mem, SSE_ALU_F32P, memopv4f32>, XD;
5354 let ExeDomain = SSEPackedDouble in
5355 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5356 f128mem, SSE_ALU_F64P, memopv2f64>, PD;
5359 // Patterns used to select 'addsub' instructions.
5360 let Predicates = [HasAVX] in {
5361 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5362 (VADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5363 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (loadv4f32 addr:$rhs))),
5364 (VADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5365 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5366 (VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5367 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (loadv2f64 addr:$rhs))),
5368 (VADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5370 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 VR256:$rhs))),
5371 (VADDSUBPSYrr VR256:$lhs, VR256:$rhs)>;
5372 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (loadv8f32 addr:$rhs))),
5373 (VADDSUBPSYrm VR256:$lhs, f256mem:$rhs)>;
5374 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 VR256:$rhs))),
5375 (VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
5376 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (loadv4f64 addr:$rhs))),
5377 (VADDSUBPDYrm VR256:$lhs, f256mem:$rhs)>;
5380 let Predicates = [UseSSE3] in {
5381 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5382 (ADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5383 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (memopv4f32 addr:$rhs))),
5384 (ADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5385 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5386 (ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5387 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (memopv2f64 addr:$rhs))),
5388 (ADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5391 //===---------------------------------------------------------------------===//
5392 // SSE3 Instructions
5393 //===---------------------------------------------------------------------===//
5396 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5397 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5399 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5401 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5402 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5403 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5406 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5408 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5409 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5410 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5411 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5413 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5414 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5416 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5418 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5419 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5420 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5423 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5425 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5426 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5427 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5428 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5431 let Predicates = [HasAVX] in {
5432 let ExeDomain = SSEPackedSingle in {
5433 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5434 X86fhadd, loadv4f32, 0>, VEX_4V;
5435 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5436 X86fhsub, loadv4f32, 0>, VEX_4V;
5437 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5438 X86fhadd, loadv8f32, 0>, VEX_4V, VEX_L;
5439 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5440 X86fhsub, loadv8f32, 0>, VEX_4V, VEX_L;
5442 let ExeDomain = SSEPackedDouble in {
5443 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5444 X86fhadd, loadv2f64, 0>, VEX_4V;
5445 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5446 X86fhsub, loadv2f64, 0>, VEX_4V;
5447 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5448 X86fhadd, loadv4f64, 0>, VEX_4V, VEX_L;
5449 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5450 X86fhsub, loadv4f64, 0>, VEX_4V, VEX_L;
5454 let Constraints = "$src1 = $dst" in {
5455 let ExeDomain = SSEPackedSingle in {
5456 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd,
5458 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub,
5461 let ExeDomain = SSEPackedDouble in {
5462 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd,
5464 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub,
5469 //===---------------------------------------------------------------------===//
5470 // SSSE3 - Packed Absolute Instructions
5471 //===---------------------------------------------------------------------===//
5474 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5475 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
5477 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5479 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5480 [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
5481 Sched<[WriteVecALU]>;
5483 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5485 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5488 (bitconvert (ld_frag addr:$src))))], IIC_SSE_PABS_RM>,
5489 Sched<[WriteVecALULd]>;
5492 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5493 multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
5494 Intrinsic IntId256> {
5495 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5497 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5498 [(set VR256:$dst, (IntId256 VR256:$src))]>,
5499 Sched<[WriteVecALU]>;
5501 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5503 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5506 (bitconvert (loadv4i64 addr:$src))))]>,
5507 Sched<[WriteVecALULd]>;
5510 // Helper fragments to match sext vXi1 to vXiY.
5511 def v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)),
5513 def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i8 15)))>;
5514 def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i8 31)))>;
5515 def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
5517 def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
5518 def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
5520 let Predicates = [HasAVX] in {
5521 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", int_x86_ssse3_pabs_b_128,
5523 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", int_x86_ssse3_pabs_w_128,
5525 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", int_x86_ssse3_pabs_d_128,
5529 (bc_v2i64 (v16i1sextv16i8)),
5530 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5531 (VPABSBrr128 VR128:$src)>;
5533 (bc_v2i64 (v8i1sextv8i16)),
5534 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5535 (VPABSWrr128 VR128:$src)>;
5537 (bc_v2i64 (v4i1sextv4i32)),
5538 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5539 (VPABSDrr128 VR128:$src)>;
5542 let Predicates = [HasAVX2] in {
5543 defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
5544 int_x86_avx2_pabs_b>, VEX, VEX_L;
5545 defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
5546 int_x86_avx2_pabs_w>, VEX, VEX_L;
5547 defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
5548 int_x86_avx2_pabs_d>, VEX, VEX_L;
5551 (bc_v4i64 (v32i1sextv32i8)),
5552 (bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))),
5553 (VPABSBrr256 VR256:$src)>;
5555 (bc_v4i64 (v16i1sextv16i16)),
5556 (bc_v4i64 (add (v16i16 VR256:$src), (v16i1sextv16i16)))),
5557 (VPABSWrr256 VR256:$src)>;
5559 (bc_v4i64 (v8i1sextv8i32)),
5560 (bc_v4i64 (add (v8i32 VR256:$src), (v8i1sextv8i32)))),
5561 (VPABSDrr256 VR256:$src)>;
5564 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", int_x86_ssse3_pabs_b_128,
5566 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", int_x86_ssse3_pabs_w_128,
5568 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", int_x86_ssse3_pabs_d_128,
5571 let Predicates = [HasSSSE3] in {
5573 (bc_v2i64 (v16i1sextv16i8)),
5574 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5575 (PABSBrr128 VR128:$src)>;
5577 (bc_v2i64 (v8i1sextv8i16)),
5578 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5579 (PABSWrr128 VR128:$src)>;
5581 (bc_v2i64 (v4i1sextv4i32)),
5582 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5583 (PABSDrr128 VR128:$src)>;
5586 //===---------------------------------------------------------------------===//
5587 // SSSE3 - Packed Binary Operator Instructions
5588 //===---------------------------------------------------------------------===//
5590 let Sched = WriteVecALU in {
5591 def SSE_PHADDSUBD : OpndItins<
5592 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5594 def SSE_PHADDSUBSW : OpndItins<
5595 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5597 def SSE_PHADDSUBW : OpndItins<
5598 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5601 let Sched = WriteShuffle in
5602 def SSE_PSHUFB : OpndItins<
5603 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5605 let Sched = WriteVecALU in
5606 def SSE_PSIGN : OpndItins<
5607 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5609 let Sched = WriteVecIMul in
5610 def SSE_PMULHRSW : OpndItins<
5611 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5614 /// SS3I_binop_rm - Simple SSSE3 bin op
5615 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5616 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5617 X86MemOperand x86memop, OpndItins itins,
5619 let isCommutable = 1 in
5620 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5621 (ins RC:$src1, RC:$src2),
5623 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5624 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5625 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5626 Sched<[itins.Sched]>;
5627 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5628 (ins RC:$src1, x86memop:$src2),
5630 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5631 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5633 (OpVT (OpNode RC:$src1,
5634 (bitconvert (memop_frag addr:$src2)))))], itins.rm>,
5635 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5638 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5639 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5640 Intrinsic IntId128, OpndItins itins,
5641 PatFrag ld_frag, bit Is2Addr = 1> {
5642 let isCommutable = 1 in
5643 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5644 (ins VR128:$src1, VR128:$src2),
5646 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5647 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5648 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5649 Sched<[itins.Sched]>;
5650 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5651 (ins VR128:$src1, i128mem:$src2),
5653 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5654 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5656 (IntId128 VR128:$src1,
5657 (bitconvert (ld_frag addr:$src2))))]>,
5658 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5661 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5663 X86FoldableSchedWrite Sched> {
5664 let isCommutable = 1 in
5665 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5666 (ins VR256:$src1, VR256:$src2),
5667 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5668 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5670 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5671 (ins VR256:$src1, i256mem:$src2),
5672 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5674 (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
5675 Sched<[Sched.Folded, ReadAfterLd]>;
5678 let ImmT = NoImm, Predicates = [HasAVX] in {
5679 let isCommutable = 0 in {
5680 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5682 SSE_PHADDSUBW, 0>, VEX_4V;
5683 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5685 SSE_PHADDSUBD, 0>, VEX_4V;
5686 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5688 SSE_PHADDSUBW, 0>, VEX_4V;
5689 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5691 SSE_PHADDSUBD, 0>, VEX_4V;
5692 defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
5694 SSE_PSIGN, 0>, VEX_4V;
5695 defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
5697 SSE_PSIGN, 0>, VEX_4V;
5698 defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
5700 SSE_PSIGN, 0>, VEX_4V;
5701 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5703 SSE_PSHUFB, 0>, VEX_4V;
5704 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5705 int_x86_ssse3_phadd_sw_128,
5706 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5707 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5708 int_x86_ssse3_phsub_sw_128,
5709 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5710 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5711 int_x86_ssse3_pmadd_ub_sw_128,
5712 SSE_PMADD, loadv2i64, 0>, VEX_4V;
5714 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5715 int_x86_ssse3_pmul_hr_sw_128,
5716 SSE_PMULHRSW, loadv2i64, 0>, VEX_4V;
5719 let ImmT = NoImm, Predicates = [HasAVX2] in {
5720 let isCommutable = 0 in {
5721 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5723 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5724 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5726 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5727 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5729 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5730 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5732 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5733 defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
5735 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5736 defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
5738 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5739 defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
5741 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5742 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5744 SSE_PSHUFB, 0>, VEX_4V, VEX_L;
5745 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5746 int_x86_avx2_phadd_sw,
5747 WriteVecALU>, VEX_4V, VEX_L;
5748 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5749 int_x86_avx2_phsub_sw,
5750 WriteVecALU>, VEX_4V, VEX_L;
5751 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5752 int_x86_avx2_pmadd_ub_sw,
5753 WriteVecIMul>, VEX_4V, VEX_L;
5755 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5756 int_x86_avx2_pmul_hr_sw,
5757 WriteVecIMul>, VEX_4V, VEX_L;
5760 // None of these have i8 immediate fields.
5761 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5762 let isCommutable = 0 in {
5763 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5764 memopv2i64, i128mem, SSE_PHADDSUBW>;
5765 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5766 memopv2i64, i128mem, SSE_PHADDSUBD>;
5767 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5768 memopv2i64, i128mem, SSE_PHADDSUBW>;
5769 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5770 memopv2i64, i128mem, SSE_PHADDSUBD>;
5771 defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
5772 memopv2i64, i128mem, SSE_PSIGN>;
5773 defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
5774 memopv2i64, i128mem, SSE_PSIGN>;
5775 defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
5776 memopv2i64, i128mem, SSE_PSIGN>;
5777 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5778 memopv2i64, i128mem, SSE_PSHUFB>;
5779 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5780 int_x86_ssse3_phadd_sw_128,
5781 SSE_PHADDSUBSW, memopv2i64>;
5782 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5783 int_x86_ssse3_phsub_sw_128,
5784 SSE_PHADDSUBSW, memopv2i64>;
5785 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5786 int_x86_ssse3_pmadd_ub_sw_128,
5787 SSE_PMADD, memopv2i64>;
5789 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5790 int_x86_ssse3_pmul_hr_sw_128,
5791 SSE_PMULHRSW, memopv2i64>;
5794 //===---------------------------------------------------------------------===//
5795 // SSSE3 - Packed Align Instruction Patterns
5796 //===---------------------------------------------------------------------===//
5798 multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
5799 let hasSideEffects = 0 in {
5800 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
5801 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
5803 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5805 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5806 [], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
5808 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
5809 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
5811 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5813 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5814 [], IIC_SSE_PALIGNRM>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5818 multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
5819 let hasSideEffects = 0 in {
5820 def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
5821 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
5823 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5824 []>, Sched<[WriteShuffle]>;
5826 def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
5827 (ins VR256:$src1, i256mem:$src2, u8imm:$src3),
5829 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5830 []>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5834 let Predicates = [HasAVX] in
5835 defm VPALIGN : ssse3_palignr<"vpalignr", 0>, VEX_4V;
5836 let Predicates = [HasAVX2] in
5837 defm VPALIGN : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
5838 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
5839 defm PALIGN : ssse3_palignr<"palignr">;
5841 let Predicates = [HasAVX2] in {
5842 def : Pat<(v8i32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5843 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5844 def : Pat<(v8f32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5845 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5846 def : Pat<(v16i16 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5847 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5848 def : Pat<(v32i8 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5849 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5852 let Predicates = [HasAVX] in {
5853 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5854 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5855 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5856 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5857 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5858 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5859 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5860 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5863 let Predicates = [UseSSSE3] in {
5864 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5865 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5866 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5867 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5868 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5869 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5870 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5871 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5874 //===---------------------------------------------------------------------===//
5875 // SSSE3 - Thread synchronization
5876 //===---------------------------------------------------------------------===//
5878 let SchedRW = [WriteSystem] in {
5879 let usesCustomInserter = 1 in {
5880 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5881 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5882 Requires<[HasSSE3]>;
5885 let Uses = [EAX, ECX, EDX] in
5886 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
5887 TB, Requires<[HasSSE3]>;
5888 let Uses = [ECX, EAX] in
5889 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5890 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
5891 TB, Requires<[HasSSE3]>;
5894 def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
5895 def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
5897 def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
5898 Requires<[Not64BitMode]>;
5899 def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
5900 Requires<[In64BitMode]>;
5902 //===----------------------------------------------------------------------===//
5903 // SSE4.1 - Packed Move with Sign/Zero Extend
5904 //===----------------------------------------------------------------------===//
5906 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
5907 RegisterClass OutRC, RegisterClass InRC,
5909 def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
5910 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5912 Sched<[itins.Sched]>;
5914 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
5915 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5917 itins.rm>, Sched<[itins.Sched.Folded]>;
5920 multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
5921 X86MemOperand MemOp, X86MemOperand MemYOp,
5922 OpndItins SSEItins, OpndItins AVXItins,
5923 OpndItins AVX2Itins> {
5924 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
5925 let Predicates = [HasAVX] in
5926 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
5927 VR128, VR128, AVXItins>, VEX;
5928 let Predicates = [HasAVX2] in
5929 defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
5930 VR256, VR128, AVX2Itins>, VEX, VEX_L;
5933 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr,
5934 X86MemOperand MemOp, X86MemOperand MemYOp> {
5935 defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
5937 SSE_INTALU_ITINS_SHUFF_P,
5938 DEFAULT_ITINS_SHUFFLESCHED,
5939 DEFAULT_ITINS_SHUFFLESCHED>;
5940 defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
5941 !strconcat("pmovzx", OpcodeStr),
5943 SSE_INTALU_ITINS_SHUFF_P,
5944 DEFAULT_ITINS_SHUFFLESCHED,
5945 DEFAULT_ITINS_SHUFFLESCHED>;
5948 defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem>;
5949 defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem>;
5950 defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem>;
5952 defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem>;
5953 defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
5955 defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
5958 multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
5959 // Register-Register patterns
5960 def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
5961 (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
5962 def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
5963 (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
5964 def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
5965 (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
5967 def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
5968 (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
5969 def : Pat<(v4i64 (ExtOp (v8i16 VR128:$src))),
5970 (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
5972 def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
5973 (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
5975 // On AVX2, we also support 256bit inputs.
5976 // FIXME: remove these patterns when the old shuffle lowering goes away.
5977 def : Pat<(v16i16 (ExtOp (v32i8 VR256:$src))),
5978 (!cast<I>(OpcPrefix#BWYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5979 def : Pat<(v8i32 (ExtOp (v32i8 VR256:$src))),
5980 (!cast<I>(OpcPrefix#BDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5981 def : Pat<(v4i64 (ExtOp (v32i8 VR256:$src))),
5982 (!cast<I>(OpcPrefix#BQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5984 def : Pat<(v8i32 (ExtOp (v16i16 VR256:$src))),
5985 (!cast<I>(OpcPrefix#WDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5986 def : Pat<(v4i64 (ExtOp (v16i16 VR256:$src))),
5987 (!cast<I>(OpcPrefix#WQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5989 def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))),
5990 (!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5992 // Simple Register-Memory patterns
5993 def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5994 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5995 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5996 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5997 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5998 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6000 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6001 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6002 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6003 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6005 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
6006 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6008 // AVX2 Register-Memory patterns
6009 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6010 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
6011 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6012 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
6013 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6014 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
6015 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6016 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
6018 def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6019 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6020 def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6021 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6022 def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6023 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6024 def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6025 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6027 def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6028 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6029 def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6030 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6031 def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6032 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6033 def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6034 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6036 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6037 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6038 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6039 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6040 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6041 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6042 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6043 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6045 def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6046 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6047 def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6048 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6049 def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6050 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6051 def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6052 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6054 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6055 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6056 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6057 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6058 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6059 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6060 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6061 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6064 let Predicates = [HasAVX2] in {
6065 defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
6066 defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
6069 // SSE4.1/AVX patterns.
6070 multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
6071 SDNode ExtOp, PatFrag ExtLoad16> {
6072 def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
6073 (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
6074 def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
6075 (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
6076 def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
6077 (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
6079 def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
6080 (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
6081 def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
6082 (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
6084 def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
6085 (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
6087 def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6088 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6089 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6090 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6091 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
6092 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6094 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6095 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6096 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
6097 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6099 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
6100 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6102 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6103 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6104 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6105 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6106 def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6107 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6108 def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6109 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6110 def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6111 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6113 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6114 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6115 def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6116 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6117 def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6118 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6119 def : Pat<(v4i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6120 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6122 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
6123 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6124 def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6125 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6126 def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6127 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6128 def : Pat<(v2i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6129 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6131 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6132 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6133 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6134 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6135 def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6136 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6137 def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6138 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6139 def : Pat<(v4i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6140 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6142 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6143 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6144 def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
6145 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6146 def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6147 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6148 def : Pat<(v2i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6149 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6151 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6152 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6153 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6154 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6155 def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6156 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6157 def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6158 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6159 def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6160 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6163 let Predicates = [HasAVX] in {
6164 defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
6165 defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
6168 let Predicates = [UseSSE41] in {
6169 defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
6170 defm : SS41I_pmovx_patterns<"PMOVZX", "z", X86vzext, loadi16_anyext>;
6173 //===----------------------------------------------------------------------===//
6174 // SSE4.1 - Extract Instructions
6175 //===----------------------------------------------------------------------===//
6177 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
6178 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
6179 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6180 (ins VR128:$src1, u8imm:$src2),
6181 !strconcat(OpcodeStr,
6182 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6183 [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
6185 Sched<[WriteShuffle]>;
6186 let hasSideEffects = 0, mayStore = 1,
6187 SchedRW = [WriteShuffleLd, WriteRMW] in
6188 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6189 (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
6190 !strconcat(OpcodeStr,
6191 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6192 [(store (i8 (trunc (assertzext (X86pextrb (v16i8 VR128:$src1),
6193 imm:$src2)))), addr:$dst)]>;
6196 let Predicates = [HasAVX] in
6197 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
6199 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
6202 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
6203 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
6204 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
6205 def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6206 (ins VR128:$src1, u8imm:$src2),
6207 !strconcat(OpcodeStr,
6208 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6209 []>, Sched<[WriteShuffle]>;
6211 let hasSideEffects = 0, mayStore = 1,
6212 SchedRW = [WriteShuffleLd, WriteRMW] in
6213 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6214 (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
6215 !strconcat(OpcodeStr,
6216 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6217 [(store (i16 (trunc (assertzext (X86pextrw (v8i16 VR128:$src1),
6218 imm:$src2)))), addr:$dst)]>;
6221 let Predicates = [HasAVX] in
6222 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
6224 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
6227 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6228 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
6229 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6230 (ins VR128:$src1, u8imm:$src2),
6231 !strconcat(OpcodeStr,
6232 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6234 (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
6235 Sched<[WriteShuffle]>;
6236 let SchedRW = [WriteShuffleLd, WriteRMW] in
6237 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6238 (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
6239 !strconcat(OpcodeStr,
6240 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6241 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
6245 let Predicates = [HasAVX] in
6246 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
6248 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
6250 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6251 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
6252 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
6253 (ins VR128:$src1, u8imm:$src2),
6254 !strconcat(OpcodeStr,
6255 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6257 (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
6258 Sched<[WriteShuffle]>, REX_W;
6259 let SchedRW = [WriteShuffleLd, WriteRMW] in
6260 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6261 (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
6262 !strconcat(OpcodeStr,
6263 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6264 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
6265 addr:$dst)]>, REX_W;
6268 let Predicates = [HasAVX] in
6269 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
6271 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
6273 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
6275 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
6276 OpndItins itins = DEFAULT_ITINS> {
6277 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6278 (ins VR128:$src1, u8imm:$src2),
6279 !strconcat(OpcodeStr,
6280 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6281 [(set GR32orGR64:$dst,
6282 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
6283 itins.rr>, Sched<[WriteFBlend]>;
6284 let SchedRW = [WriteFBlendLd, WriteRMW] in
6285 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6286 (ins f32mem:$dst, VR128:$src1, u8imm:$src2),
6287 !strconcat(OpcodeStr,
6288 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6289 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
6290 addr:$dst)], itins.rm>;
6293 let ExeDomain = SSEPackedSingle in {
6294 let Predicates = [UseAVX] in
6295 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
6296 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps", SSE_EXTRACT_ITINS>;
6299 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
6300 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6303 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6305 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6308 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6309 Requires<[UseSSE41]>;
6311 //===----------------------------------------------------------------------===//
6312 // SSE4.1 - Insert Instructions
6313 //===----------------------------------------------------------------------===//
6315 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
6316 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6317 (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
6319 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6321 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6323 (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
6324 Sched<[WriteShuffle]>;
6325 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6326 (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
6328 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6330 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6332 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
6333 imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6336 let Predicates = [HasAVX] in
6337 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
6338 let Constraints = "$src1 = $dst" in
6339 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
6341 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
6342 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6343 (ins VR128:$src1, GR32:$src2, u8imm:$src3),
6345 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6347 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6349 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
6350 Sched<[WriteShuffle]>;
6351 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6352 (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
6354 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6356 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6358 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
6359 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6362 let Predicates = [HasAVX] in
6363 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
6364 let Constraints = "$src1 = $dst" in
6365 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
6367 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
6368 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6369 (ins VR128:$src1, GR64:$src2, u8imm:$src3),
6371 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6373 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6375 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
6376 Sched<[WriteShuffle]>;
6377 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6378 (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
6380 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6382 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6384 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
6385 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6388 let Predicates = [HasAVX] in
6389 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
6390 let Constraints = "$src1 = $dst" in
6391 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
6393 // insertps has a few different modes, there's the first two here below which
6394 // are optimized inserts that won't zero arbitrary elements in the destination
6395 // vector. The next one matches the intrinsic and could zero arbitrary elements
6396 // in the target vector.
6397 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
6398 OpndItins itins = DEFAULT_ITINS> {
6399 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6400 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6402 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6404 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6406 (X86insertps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
6407 Sched<[WriteFShuffle]>;
6408 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6409 (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
6411 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6413 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6415 (X86insertps VR128:$src1,
6416 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6417 imm:$src3))], itins.rm>,
6418 Sched<[WriteFShuffleLd, ReadAfterLd]>;
6421 let ExeDomain = SSEPackedSingle in {
6422 let Predicates = [UseAVX] in
6423 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6424 let Constraints = "$src1 = $dst" in
6425 defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1, SSE_INSERT_ITINS>;
6428 let Predicates = [UseSSE41] in {
6429 // If we're inserting an element from a load or a null pshuf of a load,
6430 // fold the load into the insertps instruction.
6431 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd (v4f32
6432 (scalar_to_vector (loadf32 addr:$src2))), (i8 0)),
6434 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6435 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd
6436 (loadv4f32 addr:$src2), (i8 0)), imm:$src3)),
6437 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6440 let Predicates = [UseAVX] in {
6441 // If we're inserting an element from a vbroadcast of a load, fold the
6442 // load into the X86insertps instruction.
6443 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6444 (X86VBroadcast (loadf32 addr:$src2)), imm:$src3)),
6445 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6446 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6447 (X86VBroadcast (loadv4f32 addr:$src2)), imm:$src3)),
6448 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6451 //===----------------------------------------------------------------------===//
6452 // SSE4.1 - Round Instructions
6453 //===----------------------------------------------------------------------===//
6455 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6456 X86MemOperand x86memop, RegisterClass RC,
6457 PatFrag mem_frag32, PatFrag mem_frag64,
6458 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6459 let ExeDomain = SSEPackedSingle in {
6460 // Intrinsic operation, reg.
6461 // Vector intrinsic operation, reg
6462 def PSr : SS4AIi8<opcps, MRMSrcReg,
6463 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6464 !strconcat(OpcodeStr,
6465 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6466 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
6467 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6469 // Vector intrinsic operation, mem
6470 def PSm : SS4AIi8<opcps, MRMSrcMem,
6471 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6472 !strconcat(OpcodeStr,
6473 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6475 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))],
6476 IIC_SSE_ROUNDPS_MEM>, Sched<[WriteFAddLd]>;
6477 } // ExeDomain = SSEPackedSingle
6479 let ExeDomain = SSEPackedDouble in {
6480 // Vector intrinsic operation, reg
6481 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6482 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6483 !strconcat(OpcodeStr,
6484 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6485 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
6486 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6488 // Vector intrinsic operation, mem
6489 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6490 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6491 !strconcat(OpcodeStr,
6492 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6494 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))],
6495 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAddLd]>;
6496 } // ExeDomain = SSEPackedDouble
6499 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6502 Intrinsic F64Int, bit Is2Addr = 1> {
6503 let ExeDomain = GenericDomain in {
6505 let hasSideEffects = 0 in
6506 def SSr : SS4AIi8<opcss, MRMSrcReg,
6507 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32u8imm:$src3),
6509 !strconcat(OpcodeStr,
6510 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6511 !strconcat(OpcodeStr,
6512 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6513 []>, Sched<[WriteFAdd]>;
6515 // Intrinsic operation, reg.
6516 let isCodeGenOnly = 1 in
6517 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6518 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6520 !strconcat(OpcodeStr,
6521 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6522 !strconcat(OpcodeStr,
6523 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6524 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6527 // Intrinsic operation, mem.
6528 def SSm : SS4AIi8<opcss, MRMSrcMem,
6529 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32u8imm:$src3),
6531 !strconcat(OpcodeStr,
6532 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6533 !strconcat(OpcodeStr,
6534 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6536 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6537 Sched<[WriteFAddLd, ReadAfterLd]>;
6540 let hasSideEffects = 0 in
6541 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6542 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32u8imm:$src3),
6544 !strconcat(OpcodeStr,
6545 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6546 !strconcat(OpcodeStr,
6547 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6548 []>, Sched<[WriteFAdd]>;
6550 // Intrinsic operation, reg.
6551 let isCodeGenOnly = 1 in
6552 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6553 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6555 !strconcat(OpcodeStr,
6556 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6557 !strconcat(OpcodeStr,
6558 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6559 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6562 // Intrinsic operation, mem.
6563 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6564 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32u8imm:$src3),
6566 !strconcat(OpcodeStr,
6567 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6568 !strconcat(OpcodeStr,
6569 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6571 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6572 Sched<[WriteFAddLd, ReadAfterLd]>;
6573 } // ExeDomain = GenericDomain
6576 // FP round - roundss, roundps, roundsd, roundpd
6577 let Predicates = [HasAVX] in {
6579 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6580 loadv4f32, loadv2f64,
6581 int_x86_sse41_round_ps,
6582 int_x86_sse41_round_pd>, VEX;
6583 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6584 loadv8f32, loadv4f64,
6585 int_x86_avx_round_ps_256,
6586 int_x86_avx_round_pd_256>, VEX, VEX_L;
6587 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6588 int_x86_sse41_round_ss,
6589 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6592 let Predicates = [UseAVX] in {
6593 def : Pat<(ffloor FR32:$src),
6594 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6595 def : Pat<(f64 (ffloor FR64:$src)),
6596 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6597 def : Pat<(f32 (fnearbyint FR32:$src)),
6598 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6599 def : Pat<(f64 (fnearbyint FR64:$src)),
6600 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6601 def : Pat<(f32 (fceil FR32:$src)),
6602 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6603 def : Pat<(f64 (fceil FR64:$src)),
6604 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6605 def : Pat<(f32 (frint FR32:$src)),
6606 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6607 def : Pat<(f64 (frint FR64:$src)),
6608 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6609 def : Pat<(f32 (ftrunc FR32:$src)),
6610 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6611 def : Pat<(f64 (ftrunc FR64:$src)),
6612 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6615 let Predicates = [HasAVX] in {
6616 def : Pat<(v4f32 (ffloor VR128:$src)),
6617 (VROUNDPSr VR128:$src, (i32 0x1))>;
6618 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6619 (VROUNDPSr VR128:$src, (i32 0xC))>;
6620 def : Pat<(v4f32 (fceil VR128:$src)),
6621 (VROUNDPSr VR128:$src, (i32 0x2))>;
6622 def : Pat<(v4f32 (frint VR128:$src)),
6623 (VROUNDPSr VR128:$src, (i32 0x4))>;
6624 def : Pat<(v4f32 (ftrunc VR128:$src)),
6625 (VROUNDPSr VR128:$src, (i32 0x3))>;
6627 def : Pat<(v2f64 (ffloor VR128:$src)),
6628 (VROUNDPDr VR128:$src, (i32 0x1))>;
6629 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6630 (VROUNDPDr VR128:$src, (i32 0xC))>;
6631 def : Pat<(v2f64 (fceil VR128:$src)),
6632 (VROUNDPDr VR128:$src, (i32 0x2))>;
6633 def : Pat<(v2f64 (frint VR128:$src)),
6634 (VROUNDPDr VR128:$src, (i32 0x4))>;
6635 def : Pat<(v2f64 (ftrunc VR128:$src)),
6636 (VROUNDPDr VR128:$src, (i32 0x3))>;
6638 def : Pat<(v8f32 (ffloor VR256:$src)),
6639 (VROUNDYPSr VR256:$src, (i32 0x1))>;
6640 def : Pat<(v8f32 (fnearbyint VR256:$src)),
6641 (VROUNDYPSr VR256:$src, (i32 0xC))>;
6642 def : Pat<(v8f32 (fceil VR256:$src)),
6643 (VROUNDYPSr VR256:$src, (i32 0x2))>;
6644 def : Pat<(v8f32 (frint VR256:$src)),
6645 (VROUNDYPSr VR256:$src, (i32 0x4))>;
6646 def : Pat<(v8f32 (ftrunc VR256:$src)),
6647 (VROUNDYPSr VR256:$src, (i32 0x3))>;
6649 def : Pat<(v4f64 (ffloor VR256:$src)),
6650 (VROUNDYPDr VR256:$src, (i32 0x1))>;
6651 def : Pat<(v4f64 (fnearbyint VR256:$src)),
6652 (VROUNDYPDr VR256:$src, (i32 0xC))>;
6653 def : Pat<(v4f64 (fceil VR256:$src)),
6654 (VROUNDYPDr VR256:$src, (i32 0x2))>;
6655 def : Pat<(v4f64 (frint VR256:$src)),
6656 (VROUNDYPDr VR256:$src, (i32 0x4))>;
6657 def : Pat<(v4f64 (ftrunc VR256:$src)),
6658 (VROUNDYPDr VR256:$src, (i32 0x3))>;
6661 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6662 memopv4f32, memopv2f64,
6663 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6664 let Constraints = "$src1 = $dst" in
6665 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6666 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6668 let Predicates = [UseSSE41] in {
6669 def : Pat<(ffloor FR32:$src),
6670 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6671 def : Pat<(f64 (ffloor FR64:$src)),
6672 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6673 def : Pat<(f32 (fnearbyint FR32:$src)),
6674 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6675 def : Pat<(f64 (fnearbyint FR64:$src)),
6676 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6677 def : Pat<(f32 (fceil FR32:$src)),
6678 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6679 def : Pat<(f64 (fceil FR64:$src)),
6680 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6681 def : Pat<(f32 (frint FR32:$src)),
6682 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6683 def : Pat<(f64 (frint FR64:$src)),
6684 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6685 def : Pat<(f32 (ftrunc FR32:$src)),
6686 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6687 def : Pat<(f64 (ftrunc FR64:$src)),
6688 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6690 def : Pat<(v4f32 (ffloor VR128:$src)),
6691 (ROUNDPSr VR128:$src, (i32 0x1))>;
6692 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6693 (ROUNDPSr VR128:$src, (i32 0xC))>;
6694 def : Pat<(v4f32 (fceil VR128:$src)),
6695 (ROUNDPSr VR128:$src, (i32 0x2))>;
6696 def : Pat<(v4f32 (frint VR128:$src)),
6697 (ROUNDPSr VR128:$src, (i32 0x4))>;
6698 def : Pat<(v4f32 (ftrunc VR128:$src)),
6699 (ROUNDPSr VR128:$src, (i32 0x3))>;
6701 def : Pat<(v2f64 (ffloor VR128:$src)),
6702 (ROUNDPDr VR128:$src, (i32 0x1))>;
6703 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6704 (ROUNDPDr VR128:$src, (i32 0xC))>;
6705 def : Pat<(v2f64 (fceil VR128:$src)),
6706 (ROUNDPDr VR128:$src, (i32 0x2))>;
6707 def : Pat<(v2f64 (frint VR128:$src)),
6708 (ROUNDPDr VR128:$src, (i32 0x4))>;
6709 def : Pat<(v2f64 (ftrunc VR128:$src)),
6710 (ROUNDPDr VR128:$src, (i32 0x3))>;
6713 //===----------------------------------------------------------------------===//
6714 // SSE4.1 - Packed Bit Test
6715 //===----------------------------------------------------------------------===//
6717 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6718 // the intel intrinsic that corresponds to this.
6719 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6720 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6721 "vptest\t{$src2, $src1|$src1, $src2}",
6722 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6723 Sched<[WriteVecLogic]>, VEX;
6724 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6725 "vptest\t{$src2, $src1|$src1, $src2}",
6726 [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
6727 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6729 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6730 "vptest\t{$src2, $src1|$src1, $src2}",
6731 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6732 Sched<[WriteVecLogic]>, VEX, VEX_L;
6733 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6734 "vptest\t{$src2, $src1|$src1, $src2}",
6735 [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
6736 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX, VEX_L;
6739 let Defs = [EFLAGS] in {
6740 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6741 "ptest\t{$src2, $src1|$src1, $src2}",
6742 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6743 Sched<[WriteVecLogic]>;
6744 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6745 "ptest\t{$src2, $src1|$src1, $src2}",
6746 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6747 Sched<[WriteVecLogicLd, ReadAfterLd]>;
6750 // The bit test instructions below are AVX only
6751 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6752 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6753 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6754 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6755 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
6756 Sched<[WriteVecLogic]>, VEX;
6757 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6758 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6759 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6760 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6763 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6764 let ExeDomain = SSEPackedSingle in {
6765 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32>;
6766 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32>,
6769 let ExeDomain = SSEPackedDouble in {
6770 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64>;
6771 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64>,
6776 //===----------------------------------------------------------------------===//
6777 // SSE4.1 - Misc Instructions
6778 //===----------------------------------------------------------------------===//
6780 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6781 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6782 "popcnt{w}\t{$src, $dst|$dst, $src}",
6783 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)],
6784 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6786 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6787 "popcnt{w}\t{$src, $dst|$dst, $src}",
6788 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6789 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6790 Sched<[WriteFAddLd]>, OpSize16, XS;
6792 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6793 "popcnt{l}\t{$src, $dst|$dst, $src}",
6794 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)],
6795 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6798 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6799 "popcnt{l}\t{$src, $dst|$dst, $src}",
6800 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6801 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6802 Sched<[WriteFAddLd]>, OpSize32, XS;
6804 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6805 "popcnt{q}\t{$src, $dst|$dst, $src}",
6806 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)],
6807 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>, XS;
6808 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6809 "popcnt{q}\t{$src, $dst|$dst, $src}",
6810 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6811 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6812 Sched<[WriteFAddLd]>, XS;
6817 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6818 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6819 Intrinsic IntId128, PatFrag ld_frag,
6820 X86FoldableSchedWrite Sched> {
6821 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6823 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6824 [(set VR128:$dst, (IntId128 VR128:$src))]>,
6826 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6828 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6830 (IntId128 (bitconvert (ld_frag addr:$src))))]>,
6831 Sched<[Sched.Folded]>;
6834 // PHMIN has the same profile as PSAD, thus we use the same scheduling
6835 // model, although the naming is misleading.
6836 let Predicates = [HasAVX] in
6837 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6838 int_x86_sse41_phminposuw, loadv2i64,
6840 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6841 int_x86_sse41_phminposuw, memopv2i64,
6844 /// SS48I_binop_rm - Simple SSE41 binary operator.
6845 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6846 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6847 X86MemOperand x86memop, bit Is2Addr = 1,
6848 OpndItins itins = SSE_INTALU_ITINS_P> {
6849 let isCommutable = 1 in
6850 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6851 (ins RC:$src1, RC:$src2),
6853 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6854 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6855 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6856 Sched<[itins.Sched]>;
6857 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6858 (ins RC:$src1, x86memop:$src2),
6860 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6861 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6863 (OpVT (OpNode RC:$src1, (bitconvert (memop_frag addr:$src2)))))]>,
6864 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6867 /// SS48I_binop_rm2 - Simple SSE41 binary operator with different src and dst
6869 multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
6870 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
6871 PatFrag memop_frag, X86MemOperand x86memop,
6873 bit IsCommutable = 0, bit Is2Addr = 1> {
6874 let isCommutable = IsCommutable in
6875 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6876 (ins RC:$src1, RC:$src2),
6878 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6879 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6880 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
6881 Sched<[itins.Sched]>;
6882 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6883 (ins RC:$src1, x86memop:$src2),
6885 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6886 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6887 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
6888 (bitconvert (memop_frag addr:$src2)))))]>,
6889 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6892 let Predicates = [HasAVX, NoVLX] in {
6893 let isCommutable = 0 in
6894 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", X86smin, v16i8, VR128,
6895 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6897 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", X86smin, v4i32, VR128,
6898 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6900 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", X86umin, v4i32, VR128,
6901 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6903 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v8i16, VR128,
6904 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6906 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v16i8, VR128,
6907 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6909 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v4i32, VR128,
6910 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6912 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v4i32, VR128,
6913 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6915 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v8i16, VR128,
6916 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6918 defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
6919 VR128, loadv2i64, i128mem,
6920 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
6923 let Predicates = [HasAVX2, NoVLX] in {
6924 let isCommutable = 0 in
6925 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", X86smin, v32i8, VR256,
6926 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6928 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", X86smin, v8i32, VR256,
6929 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6931 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", X86umin, v8i32, VR256,
6932 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6934 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v16i16, VR256,
6935 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6937 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v32i8, VR256,
6938 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6940 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v8i32, VR256,
6941 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6943 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v8i32, VR256,
6944 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6946 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v16i16, VR256,
6947 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6949 defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
6950 VR256, loadv4i64, i256mem,
6951 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
6954 let Constraints = "$src1 = $dst" in {
6955 let isCommutable = 0 in
6956 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", X86smin, v16i8, VR128,
6957 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6958 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", X86smin, v4i32, VR128,
6959 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6960 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", X86umin, v4i32, VR128,
6961 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6962 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", X86umin, v8i16, VR128,
6963 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6964 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", X86smax, v16i8, VR128,
6965 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6966 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", X86smax, v4i32, VR128,
6967 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6968 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", X86umax, v4i32, VR128,
6969 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6970 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", X86umax, v8i16, VR128,
6971 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6972 defm PMULDQ : SS48I_binop_rm2<0x28, "pmuldq", X86pmuldq, v2i64, v4i32,
6973 VR128, memopv2i64, i128mem,
6974 SSE_INTMUL_ITINS_P, 1>;
6977 let Predicates = [HasAVX, NoVLX] in {
6978 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6979 memopv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
6981 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6982 memopv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6985 let Predicates = [HasAVX2] in {
6986 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6987 loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
6989 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6990 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6994 let Constraints = "$src1 = $dst" in {
6995 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6996 memopv2i64, i128mem, 1, SSE_PMULLD_ITINS>;
6997 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6998 memopv2i64, i128mem, 1, SSE_INTALUQ_ITINS_P>;
7001 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
7002 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
7003 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
7004 X86MemOperand x86memop, bit Is2Addr = 1,
7005 OpndItins itins = DEFAULT_ITINS> {
7006 let isCommutable = 1 in
7007 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
7008 (ins RC:$src1, RC:$src2, u8imm:$src3),
7010 !strconcat(OpcodeStr,
7011 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7012 !strconcat(OpcodeStr,
7013 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7014 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
7015 Sched<[itins.Sched]>;
7016 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
7017 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
7019 !strconcat(OpcodeStr,
7020 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7021 !strconcat(OpcodeStr,
7022 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7025 (bitconvert (memop_frag addr:$src2)), imm:$src3))], itins.rm>,
7026 Sched<[itins.Sched.Folded, ReadAfterLd]>;
7029 let Predicates = [HasAVX] in {
7030 let isCommutable = 0 in {
7031 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
7032 VR128, loadv2i64, i128mem, 0,
7033 DEFAULT_ITINS_MPSADSCHED>, VEX_4V;
7036 let ExeDomain = SSEPackedSingle in {
7037 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
7038 VR128, loadv4f32, f128mem, 0,
7039 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7040 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
7041 int_x86_avx_blend_ps_256, VR256, loadv8f32,
7042 f256mem, 0, DEFAULT_ITINS_FBLENDSCHED>,
7045 let ExeDomain = SSEPackedDouble in {
7046 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
7047 VR128, loadv2f64, f128mem, 0,
7048 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7049 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
7050 int_x86_avx_blend_pd_256,VR256, loadv4f64,
7051 f256mem, 0, DEFAULT_ITINS_FBLENDSCHED>,
7054 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
7055 VR128, loadv2i64, i128mem, 0,
7056 DEFAULT_ITINS_BLENDSCHED>, VEX_4V;
7058 let ExeDomain = SSEPackedSingle in
7059 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
7060 VR128, loadv4f32, f128mem, 0,
7061 SSE_DPPS_ITINS>, VEX_4V;
7062 let ExeDomain = SSEPackedDouble in
7063 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
7064 VR128, loadv2f64, f128mem, 0,
7065 SSE_DPPS_ITINS>, VEX_4V;
7066 let ExeDomain = SSEPackedSingle in
7067 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
7068 VR256, loadv8f32, i256mem, 0,
7069 SSE_DPPS_ITINS>, VEX_4V, VEX_L;
7072 let Predicates = [HasAVX2] in {
7073 let isCommutable = 0 in {
7074 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
7075 VR256, loadv4i64, i256mem, 0,
7076 DEFAULT_ITINS_MPSADSCHED>, VEX_4V, VEX_L;
7078 defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
7079 VR256, loadv4i64, i256mem, 0,
7080 DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
7083 let Constraints = "$src1 = $dst" in {
7084 let isCommutable = 0 in {
7085 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
7086 VR128, memopv2i64, i128mem,
7087 1, SSE_MPSADBW_ITINS>;
7089 let ExeDomain = SSEPackedSingle in
7090 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
7091 VR128, memopv4f32, f128mem,
7092 1, SSE_INTALU_ITINS_FBLEND_P>;
7093 let ExeDomain = SSEPackedDouble in
7094 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
7095 VR128, memopv2f64, f128mem,
7096 1, SSE_INTALU_ITINS_FBLEND_P>;
7097 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
7098 VR128, memopv2i64, i128mem,
7099 1, SSE_INTALU_ITINS_BLEND_P>;
7100 let ExeDomain = SSEPackedSingle in
7101 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
7102 VR128, memopv4f32, f128mem, 1,
7104 let ExeDomain = SSEPackedDouble in
7105 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
7106 VR128, memopv2f64, f128mem, 1,
7110 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
7111 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
7112 RegisterClass RC, X86MemOperand x86memop,
7113 PatFrag mem_frag, Intrinsic IntId,
7114 X86FoldableSchedWrite Sched> {
7115 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
7116 (ins RC:$src1, RC:$src2, RC:$src3),
7117 !strconcat(OpcodeStr,
7118 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7119 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
7120 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7123 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
7124 (ins RC:$src1, x86memop:$src2, RC:$src3),
7125 !strconcat(OpcodeStr,
7126 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7128 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
7130 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7131 Sched<[Sched.Folded, ReadAfterLd]>;
7134 let Predicates = [HasAVX] in {
7135 let ExeDomain = SSEPackedDouble in {
7136 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
7137 loadv2f64, int_x86_sse41_blendvpd,
7139 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
7140 loadv4f64, int_x86_avx_blendv_pd_256,
7141 WriteFVarBlend>, VEX_L;
7142 } // ExeDomain = SSEPackedDouble
7143 let ExeDomain = SSEPackedSingle in {
7144 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
7145 loadv4f32, int_x86_sse41_blendvps,
7147 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
7148 loadv8f32, int_x86_avx_blendv_ps_256,
7149 WriteFVarBlend>, VEX_L;
7150 } // ExeDomain = SSEPackedSingle
7151 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
7152 loadv2i64, int_x86_sse41_pblendvb,
7156 let Predicates = [HasAVX2] in {
7157 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
7158 loadv4i64, int_x86_avx2_pblendvb,
7159 WriteVarBlend>, VEX_L;
7162 let Predicates = [HasAVX] in {
7163 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
7164 (v16i8 VR128:$src2))),
7165 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7166 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
7167 (v4i32 VR128:$src2))),
7168 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7169 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
7170 (v4f32 VR128:$src2))),
7171 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7172 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
7173 (v2i64 VR128:$src2))),
7174 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7175 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
7176 (v2f64 VR128:$src2))),
7177 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7178 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
7179 (v8i32 VR256:$src2))),
7180 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7181 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
7182 (v8f32 VR256:$src2))),
7183 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7184 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
7185 (v4i64 VR256:$src2))),
7186 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7187 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
7188 (v4f64 VR256:$src2))),
7189 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7191 def : Pat<(v8f32 (X86Blendi (v8f32 VR256:$src1), (v8f32 VR256:$src2),
7193 (VBLENDPSYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7194 def : Pat<(v4f64 (X86Blendi (v4f64 VR256:$src1), (v4f64 VR256:$src2),
7196 (VBLENDPDYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7198 def : Pat<(v8i16 (X86Blendi (v8i16 VR128:$src1), (v8i16 VR128:$src2),
7200 (VPBLENDWrri VR128:$src1, VR128:$src2, imm:$mask)>;
7201 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$src1), (v4f32 VR128:$src2),
7203 (VBLENDPSrri VR128:$src1, VR128:$src2, imm:$mask)>;
7204 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$src1), (v2f64 VR128:$src2),
7206 (VBLENDPDrri VR128:$src1, VR128:$src2, imm:$mask)>;
7209 let Predicates = [HasAVX2] in {
7210 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
7211 (v32i8 VR256:$src2))),
7212 (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7213 def : Pat<(v16i16 (X86Blendi (v16i16 VR256:$src1), (v16i16 VR256:$src2),
7215 (VPBLENDWYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7219 let Predicates = [UseAVX] in {
7220 let AddedComplexity = 15 in {
7221 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
7222 // MOVS{S,D} to the lower bits.
7223 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
7224 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
7225 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7226 (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7227 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7228 (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7229 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
7230 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
7232 // Move low f32 and clear high bits.
7233 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
7234 (VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
7235 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
7236 (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
7239 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
7240 (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
7241 (SUBREG_TO_REG (i32 0),
7242 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
7244 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
7245 (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
7246 (SUBREG_TO_REG (i64 0),
7247 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
7250 // Move low f64 and clear high bits.
7251 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
7252 (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
7254 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
7255 (VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
7258 let Predicates = [UseSSE41] in {
7259 // With SSE41 we can use blends for these patterns.
7260 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7261 (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7262 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7263 (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7264 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
7265 (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;
7269 /// SS41I_ternary_int - SSE 4.1 ternary operator
7270 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
7271 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7272 X86MemOperand x86memop, Intrinsic IntId,
7273 OpndItins itins = DEFAULT_ITINS> {
7274 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
7275 (ins VR128:$src1, VR128:$src2),
7276 !strconcat(OpcodeStr,
7277 "\t{$src2, $dst|$dst, $src2}"),
7278 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))],
7279 itins.rr>, Sched<[itins.Sched]>;
7281 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
7282 (ins VR128:$src1, x86memop:$src2),
7283 !strconcat(OpcodeStr,
7284 "\t{$src2, $dst|$dst, $src2}"),
7287 (bitconvert (mem_frag addr:$src2)), XMM0))],
7288 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
7292 let ExeDomain = SSEPackedDouble in
7293 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
7294 int_x86_sse41_blendvpd,
7295 DEFAULT_ITINS_FBLENDSCHED>;
7296 let ExeDomain = SSEPackedSingle in
7297 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
7298 int_x86_sse41_blendvps,
7299 DEFAULT_ITINS_FBLENDSCHED>;
7300 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
7301 int_x86_sse41_pblendvb,
7302 DEFAULT_ITINS_VARBLENDSCHED>;
7304 // Aliases with the implicit xmm0 argument
7305 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7306 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
7307 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7308 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
7309 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7310 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
7311 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7312 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
7313 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7314 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
7315 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7316 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
7318 let Predicates = [UseSSE41] in {
7319 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
7320 (v16i8 VR128:$src2))),
7321 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
7322 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
7323 (v4i32 VR128:$src2))),
7324 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7325 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
7326 (v4f32 VR128:$src2))),
7327 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7328 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
7329 (v2i64 VR128:$src2))),
7330 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7331 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
7332 (v2f64 VR128:$src2))),
7333 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7335 def : Pat<(v8i16 (X86Blendi (v8i16 VR128:$src1), (v8i16 VR128:$src2),
7337 (PBLENDWrri VR128:$src1, VR128:$src2, imm:$mask)>;
7338 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$src1), (v4f32 VR128:$src2),
7340 (BLENDPSrri VR128:$src1, VR128:$src2, imm:$mask)>;
7341 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$src1), (v2f64 VR128:$src2),
7343 (BLENDPDrri VR128:$src1, VR128:$src2, imm:$mask)>;
7347 let SchedRW = [WriteLoad] in {
7348 let Predicates = [HasAVX] in
7349 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7350 "vmovntdqa\t{$src, $dst|$dst, $src}",
7351 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
7353 let Predicates = [HasAVX2] in
7354 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
7355 "vmovntdqa\t{$src, $dst|$dst, $src}",
7356 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
7358 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7359 "movntdqa\t{$src, $dst|$dst, $src}",
7360 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
7363 //===----------------------------------------------------------------------===//
7364 // SSE4.2 - Compare Instructions
7365 //===----------------------------------------------------------------------===//
7367 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
7368 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7369 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
7370 X86MemOperand x86memop, bit Is2Addr = 1> {
7371 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
7372 (ins RC:$src1, RC:$src2),
7374 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7375 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7376 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>;
7377 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
7378 (ins RC:$src1, x86memop:$src2),
7380 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7381 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7383 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>;
7386 let Predicates = [HasAVX] in
7387 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
7388 loadv2i64, i128mem, 0>, VEX_4V;
7390 let Predicates = [HasAVX2] in
7391 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
7392 loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
7394 let Constraints = "$src1 = $dst" in
7395 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
7396 memopv2i64, i128mem>;
7398 //===----------------------------------------------------------------------===//
7399 // SSE4.2 - String/text Processing Instructions
7400 //===----------------------------------------------------------------------===//
7402 // Packed Compare Implicit Length Strings, Return Mask
7403 multiclass pseudo_pcmpistrm<string asm, PatFrag ld_frag> {
7404 def REG : PseudoI<(outs VR128:$dst),
7405 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7406 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
7408 def MEM : PseudoI<(outs VR128:$dst),
7409 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7410 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
7411 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7414 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7415 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128", loadv2i64>,
7417 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128", memopv2i64>,
7418 Requires<[UseSSE42]>;
7421 multiclass pcmpistrm_SS42AI<string asm> {
7422 def rr : SS42AI<0x62, MRMSrcReg, (outs),
7423 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7424 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7425 []>, Sched<[WritePCmpIStrM]>;
7427 def rm :SS42AI<0x62, MRMSrcMem, (outs),
7428 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7429 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7430 []>, Sched<[WritePCmpIStrMLd, ReadAfterLd]>;
7433 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
7434 let Predicates = [HasAVX] in
7435 defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
7436 defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
7439 // Packed Compare Explicit Length Strings, Return Mask
7440 multiclass pseudo_pcmpestrm<string asm, PatFrag ld_frag> {
7441 def REG : PseudoI<(outs VR128:$dst),
7442 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7443 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
7444 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7445 def MEM : PseudoI<(outs VR128:$dst),
7446 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7447 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
7448 (bc_v16i8 (ld_frag addr:$src3)), EDX, imm:$src5))]>;
7451 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7452 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128", loadv2i64>,
7454 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128", memopv2i64>,
7455 Requires<[UseSSE42]>;
7458 multiclass SS42AI_pcmpestrm<string asm> {
7459 def rr : SS42AI<0x60, MRMSrcReg, (outs),
7460 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7461 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7462 []>, Sched<[WritePCmpEStrM]>;
7464 def rm : SS42AI<0x60, MRMSrcMem, (outs),
7465 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7466 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7467 []>, Sched<[WritePCmpEStrMLd, ReadAfterLd]>;
7470 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7471 let Predicates = [HasAVX] in
7472 defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
7473 defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
7476 // Packed Compare Implicit Length Strings, Return Index
7477 multiclass pseudo_pcmpistri<string asm, PatFrag ld_frag> {
7478 def REG : PseudoI<(outs GR32:$dst),
7479 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7480 [(set GR32:$dst, EFLAGS,
7481 (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
7482 def MEM : PseudoI<(outs GR32:$dst),
7483 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7484 [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
7485 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7488 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7489 defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI", loadv2i64>,
7491 defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI", memopv2i64>,
7492 Requires<[UseSSE42]>;
7495 multiclass SS42AI_pcmpistri<string asm> {
7496 def rr : SS42AI<0x63, MRMSrcReg, (outs),
7497 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7498 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7499 []>, Sched<[WritePCmpIStrI]>;
7501 def rm : SS42AI<0x63, MRMSrcMem, (outs),
7502 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7503 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7504 []>, Sched<[WritePCmpIStrILd, ReadAfterLd]>;
7507 let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
7508 let Predicates = [HasAVX] in
7509 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
7510 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
7513 // Packed Compare Explicit Length Strings, Return Index
7514 multiclass pseudo_pcmpestri<string asm, PatFrag ld_frag> {
7515 def REG : PseudoI<(outs GR32:$dst),
7516 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7517 [(set GR32:$dst, EFLAGS,
7518 (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7519 def MEM : PseudoI<(outs GR32:$dst),
7520 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7521 [(set GR32:$dst, EFLAGS,
7522 (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (ld_frag addr:$src3)), EDX,
7526 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7527 defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI", loadv2i64>,
7529 defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI", memopv2i64>,
7530 Requires<[UseSSE42]>;
7533 multiclass SS42AI_pcmpestri<string asm> {
7534 def rr : SS42AI<0x61, MRMSrcReg, (outs),
7535 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7536 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7537 []>, Sched<[WritePCmpEStrI]>;
7539 def rm : SS42AI<0x61, MRMSrcMem, (outs),
7540 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7541 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7542 []>, Sched<[WritePCmpEStrILd, ReadAfterLd]>;
7545 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7546 let Predicates = [HasAVX] in
7547 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
7548 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
7551 //===----------------------------------------------------------------------===//
7552 // SSE4.2 - CRC Instructions
7553 //===----------------------------------------------------------------------===//
7555 // No CRC instructions have AVX equivalents
7557 // crc intrinsic instruction
7558 // This set of instructions are only rm, the only difference is the size
7560 class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
7561 RegisterClass RCIn, SDPatternOperator Int> :
7562 SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
7563 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7564 [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>,
7567 class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
7568 X86MemOperand x86memop, SDPatternOperator Int> :
7569 SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
7570 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7571 [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))],
7572 IIC_CRC32_MEM>, Sched<[WriteFAddLd, ReadAfterLd]>;
7574 let Constraints = "$src1 = $dst" in {
7575 def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
7576 int_x86_sse42_crc32_32_8>;
7577 def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
7578 int_x86_sse42_crc32_32_8>;
7579 def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
7580 int_x86_sse42_crc32_32_16>, OpSize16;
7581 def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
7582 int_x86_sse42_crc32_32_16>, OpSize16;
7583 def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
7584 int_x86_sse42_crc32_32_32>, OpSize32;
7585 def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
7586 int_x86_sse42_crc32_32_32>, OpSize32;
7587 def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
7588 int_x86_sse42_crc32_64_64>, REX_W;
7589 def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
7590 int_x86_sse42_crc32_64_64>, REX_W;
7591 let hasSideEffects = 0 in {
7593 def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
7595 def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
7600 //===----------------------------------------------------------------------===//
7601 // SHA-NI Instructions
7602 //===----------------------------------------------------------------------===//
7604 multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
7606 def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
7607 (ins VR128:$src1, VR128:$src2),
7608 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7610 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
7611 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>, T8;
7613 def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
7614 (ins VR128:$src1, i128mem:$src2),
7615 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7617 (set VR128:$dst, (IntId VR128:$src1,
7618 (bc_v4i32 (memopv2i64 addr:$src2)), XMM0)),
7619 (set VR128:$dst, (IntId VR128:$src1,
7620 (bc_v4i32 (memopv2i64 addr:$src2)))))]>, T8;
7623 let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
7624 def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
7625 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7626 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7628 (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
7629 (i8 imm:$src3)))]>, TA;
7630 def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
7631 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7632 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7634 (int_x86_sha1rnds4 VR128:$src1,
7635 (bc_v4i32 (memopv2i64 addr:$src2)),
7636 (i8 imm:$src3)))]>, TA;
7638 defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte>;
7639 defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1>;
7640 defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2>;
7643 defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2, 1>;
7645 defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1>;
7646 defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2>;
7649 // Aliases with explicit %xmm0
7650 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7651 (SHA256RNDS2rr VR128:$dst, VR128:$src2)>;
7652 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7653 (SHA256RNDS2rm VR128:$dst, i128mem:$src2)>;
7655 //===----------------------------------------------------------------------===//
7656 // AES-NI Instructions
7657 //===----------------------------------------------------------------------===//
7659 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
7660 PatFrag ld_frag, bit Is2Addr = 1> {
7661 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
7662 (ins VR128:$src1, VR128:$src2),
7664 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7665 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7666 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
7667 Sched<[WriteAESDecEnc]>;
7668 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
7669 (ins VR128:$src1, i128mem:$src2),
7671 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7672 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7674 (IntId128 VR128:$src1, (ld_frag addr:$src2)))]>,
7675 Sched<[WriteAESDecEncLd, ReadAfterLd]>;
7678 // Perform One Round of an AES Encryption/Decryption Flow
7679 let Predicates = [HasAVX, HasAES] in {
7680 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7681 int_x86_aesni_aesenc, loadv2i64, 0>, VEX_4V;
7682 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7683 int_x86_aesni_aesenclast, loadv2i64, 0>, VEX_4V;
7684 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7685 int_x86_aesni_aesdec, loadv2i64, 0>, VEX_4V;
7686 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7687 int_x86_aesni_aesdeclast, loadv2i64, 0>, VEX_4V;
7690 let Constraints = "$src1 = $dst" in {
7691 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7692 int_x86_aesni_aesenc, memopv2i64>;
7693 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7694 int_x86_aesni_aesenclast, memopv2i64>;
7695 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7696 int_x86_aesni_aesdec, memopv2i64>;
7697 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7698 int_x86_aesni_aesdeclast, memopv2i64>;
7701 // Perform the AES InvMixColumn Transformation
7702 let Predicates = [HasAVX, HasAES] in {
7703 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7705 "vaesimc\t{$src1, $dst|$dst, $src1}",
7707 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
7709 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7710 (ins i128mem:$src1),
7711 "vaesimc\t{$src1, $dst|$dst, $src1}",
7712 [(set VR128:$dst, (int_x86_aesni_aesimc (loadv2i64 addr:$src1)))]>,
7713 Sched<[WriteAESIMCLd]>, VEX;
7715 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7717 "aesimc\t{$src1, $dst|$dst, $src1}",
7719 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
7720 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7721 (ins i128mem:$src1),
7722 "aesimc\t{$src1, $dst|$dst, $src1}",
7723 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7724 Sched<[WriteAESIMCLd]>;
7726 // AES Round Key Generation Assist
7727 let Predicates = [HasAVX, HasAES] in {
7728 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7729 (ins VR128:$src1, u8imm:$src2),
7730 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7732 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7733 Sched<[WriteAESKeyGen]>, VEX;
7734 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7735 (ins i128mem:$src1, u8imm:$src2),
7736 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7738 (int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
7739 Sched<[WriteAESKeyGenLd]>, VEX;
7741 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7742 (ins VR128:$src1, u8imm:$src2),
7743 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7745 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7746 Sched<[WriteAESKeyGen]>;
7747 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7748 (ins i128mem:$src1, u8imm:$src2),
7749 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7751 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7752 Sched<[WriteAESKeyGenLd]>;
7754 //===----------------------------------------------------------------------===//
7755 // PCLMUL Instructions
7756 //===----------------------------------------------------------------------===//
7758 // AVX carry-less Multiplication instructions
7759 let isCommutable = 1 in
7760 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7761 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7762 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7764 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
7765 Sched<[WriteCLMul]>;
7767 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7768 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7769 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7770 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7771 (loadv2i64 addr:$src2), imm:$src3))]>,
7772 Sched<[WriteCLMulLd, ReadAfterLd]>;
7774 // Carry-less Multiplication instructions
7775 let Constraints = "$src1 = $dst" in {
7776 let isCommutable = 1 in
7777 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7778 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7779 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7781 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
7782 IIC_SSE_PCLMULQDQ_RR>, Sched<[WriteCLMul]>;
7784 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7785 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7786 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7787 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7788 (memopv2i64 addr:$src2), imm:$src3))],
7789 IIC_SSE_PCLMULQDQ_RM>,
7790 Sched<[WriteCLMulLd, ReadAfterLd]>;
7791 } // Constraints = "$src1 = $dst"
7794 multiclass pclmul_alias<string asm, int immop> {
7795 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7796 (PCLMULQDQrr VR128:$dst, VR128:$src, immop), 0>;
7798 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7799 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop), 0>;
7801 def : InstAlias<!strconcat("vpclmul", asm,
7802 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7803 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop),
7806 def : InstAlias<!strconcat("vpclmul", asm,
7807 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7808 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop),
7811 defm : pclmul_alias<"hqhq", 0x11>;
7812 defm : pclmul_alias<"hqlq", 0x01>;
7813 defm : pclmul_alias<"lqhq", 0x10>;
7814 defm : pclmul_alias<"lqlq", 0x00>;
7816 //===----------------------------------------------------------------------===//
7817 // SSE4A Instructions
7818 //===----------------------------------------------------------------------===//
7820 let Predicates = [HasSSE4A] in {
7822 let Constraints = "$src = $dst" in {
7823 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
7824 (ins VR128:$src, u8imm:$len, u8imm:$idx),
7825 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7826 [(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
7828 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7829 (ins VR128:$src, VR128:$mask),
7830 "extrq\t{$mask, $src|$src, $mask}",
7831 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7832 VR128:$mask))]>, PD;
7834 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7835 (ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
7836 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7837 [(set VR128:$dst, (int_x86_sse4a_insertqi VR128:$src,
7838 VR128:$src2, imm:$len, imm:$idx))]>, XD;
7839 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7840 (ins VR128:$src, VR128:$mask),
7841 "insertq\t{$mask, $src|$src, $mask}",
7842 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7843 VR128:$mask))]>, XD;
7846 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7847 "movntss\t{$src, $dst|$dst, $src}",
7848 [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
7850 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7851 "movntsd\t{$src, $dst|$dst, $src}",
7852 [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
7855 //===----------------------------------------------------------------------===//
7857 //===----------------------------------------------------------------------===//
7859 //===----------------------------------------------------------------------===//
7860 // VBROADCAST - Load from memory and broadcast to all elements of the
7861 // destination operand
7863 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
7864 X86MemOperand x86memop, Intrinsic Int, SchedWrite Sched> :
7865 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7866 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7867 [(set RC:$dst, (Int addr:$src))]>, Sched<[Sched]>, VEX;
7869 class avx_broadcast_no_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
7870 X86MemOperand x86memop, ValueType VT,
7871 PatFrag ld_frag, SchedWrite Sched> :
7872 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7873 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7874 [(set RC:$dst, (VT (X86VBroadcast (ld_frag addr:$src))))]>,
7875 Sched<[Sched]>, VEX {
7879 // AVX2 adds register forms
7880 class avx2_broadcast_reg<bits<8> opc, string OpcodeStr, RegisterClass RC,
7881 Intrinsic Int, SchedWrite Sched> :
7882 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7883 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7884 [(set RC:$dst, (Int VR128:$src))]>, Sched<[Sched]>, VEX;
7886 let ExeDomain = SSEPackedSingle in {
7887 def VBROADCASTSSrm : avx_broadcast_no_int<0x18, "vbroadcastss", VR128,
7888 f32mem, v4f32, loadf32, WriteLoad>;
7889 def VBROADCASTSSYrm : avx_broadcast_no_int<0x18, "vbroadcastss", VR256,
7890 f32mem, v8f32, loadf32,
7891 WriteFShuffleLd>, VEX_L;
7893 let ExeDomain = SSEPackedDouble in
7894 def VBROADCASTSDYrm : avx_broadcast_no_int<0x19, "vbroadcastsd", VR256, f64mem,
7895 v4f64, loadf64, WriteFShuffleLd>, VEX_L;
7896 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
7897 int_x86_avx_vbroadcastf128_pd_256,
7898 WriteFShuffleLd>, VEX_L;
7900 let ExeDomain = SSEPackedSingle in {
7901 def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
7902 int_x86_avx2_vbroadcast_ss_ps,
7904 def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
7905 int_x86_avx2_vbroadcast_ss_ps_256,
7906 WriteFShuffle256>, VEX_L;
7908 let ExeDomain = SSEPackedDouble in
7909 def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
7910 int_x86_avx2_vbroadcast_sd_pd_256,
7911 WriteFShuffle256>, VEX_L;
7913 let Predicates = [HasAVX2] in
7914 def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
7915 int_x86_avx2_vbroadcasti128, WriteLoad>,
7918 let Predicates = [HasAVX] in
7919 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
7920 (VBROADCASTF128 addr:$src)>;
7923 //===----------------------------------------------------------------------===//
7924 // VINSERTF128 - Insert packed floating-point values
7926 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7927 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7928 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
7929 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7930 []>, Sched<[WriteFShuffle]>, VEX_4V, VEX_L;
7932 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7933 (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
7934 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7935 []>, Sched<[WriteFShuffleLd, ReadAfterLd]>, VEX_4V, VEX_L;
7938 let Predicates = [HasAVX] in {
7939 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
7941 (VINSERTF128rr VR256:$src1, VR128:$src2,
7942 (INSERT_get_vinsert128_imm VR256:$ins))>;
7943 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
7945 (VINSERTF128rr VR256:$src1, VR128:$src2,
7946 (INSERT_get_vinsert128_imm VR256:$ins))>;
7948 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
7950 (VINSERTF128rm VR256:$src1, addr:$src2,
7951 (INSERT_get_vinsert128_imm VR256:$ins))>;
7952 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
7954 (VINSERTF128rm VR256:$src1, addr:$src2,
7955 (INSERT_get_vinsert128_imm VR256:$ins))>;
7958 let Predicates = [HasAVX1Only] in {
7959 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7961 (VINSERTF128rr VR256:$src1, VR128:$src2,
7962 (INSERT_get_vinsert128_imm VR256:$ins))>;
7963 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7965 (VINSERTF128rr VR256:$src1, VR128:$src2,
7966 (INSERT_get_vinsert128_imm VR256:$ins))>;
7967 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7969 (VINSERTF128rr VR256:$src1, VR128:$src2,
7970 (INSERT_get_vinsert128_imm VR256:$ins))>;
7971 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7973 (VINSERTF128rr VR256:$src1, VR128:$src2,
7974 (INSERT_get_vinsert128_imm VR256:$ins))>;
7976 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
7978 (VINSERTF128rm VR256:$src1, addr:$src2,
7979 (INSERT_get_vinsert128_imm VR256:$ins))>;
7980 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
7981 (bc_v4i32 (loadv2i64 addr:$src2)),
7983 (VINSERTF128rm VR256:$src1, addr:$src2,
7984 (INSERT_get_vinsert128_imm VR256:$ins))>;
7985 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
7986 (bc_v16i8 (loadv2i64 addr:$src2)),
7988 (VINSERTF128rm VR256:$src1, addr:$src2,
7989 (INSERT_get_vinsert128_imm VR256:$ins))>;
7990 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
7991 (bc_v8i16 (loadv2i64 addr:$src2)),
7993 (VINSERTF128rm VR256:$src1, addr:$src2,
7994 (INSERT_get_vinsert128_imm VR256:$ins))>;
7997 //===----------------------------------------------------------------------===//
7998 // VEXTRACTF128 - Extract packed floating-point values
8000 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
8001 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
8002 (ins VR256:$src1, u8imm:$src2),
8003 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8004 []>, Sched<[WriteFShuffle]>, VEX, VEX_L;
8006 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
8007 (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
8008 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8009 []>, Sched<[WriteStore]>, VEX, VEX_L;
8013 let Predicates = [HasAVX] in {
8014 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8015 (v4f32 (VEXTRACTF128rr
8016 (v8f32 VR256:$src1),
8017 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8018 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8019 (v2f64 (VEXTRACTF128rr
8020 (v4f64 VR256:$src1),
8021 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8023 def : Pat<(store (v4f32 (vextract128_extract:$ext (v8f32 VR256:$src1),
8024 (iPTR imm))), addr:$dst),
8025 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8026 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8027 def : Pat<(store (v2f64 (vextract128_extract:$ext (v4f64 VR256:$src1),
8028 (iPTR imm))), addr:$dst),
8029 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8030 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8033 let Predicates = [HasAVX1Only] in {
8034 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8035 (v2i64 (VEXTRACTF128rr
8036 (v4i64 VR256:$src1),
8037 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8038 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8039 (v4i32 (VEXTRACTF128rr
8040 (v8i32 VR256:$src1),
8041 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8042 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8043 (v8i16 (VEXTRACTF128rr
8044 (v16i16 VR256:$src1),
8045 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8046 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8047 (v16i8 (VEXTRACTF128rr
8048 (v32i8 VR256:$src1),
8049 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8051 def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8052 (iPTR imm))), addr:$dst),
8053 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8054 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8055 def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8056 (iPTR imm))), addr:$dst),
8057 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8058 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8059 def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8060 (iPTR imm))), addr:$dst),
8061 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8062 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8063 def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8064 (iPTR imm))), addr:$dst),
8065 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8066 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8069 //===----------------------------------------------------------------------===//
8070 // VMASKMOV - Conditional SIMD Packed Loads and Stores
8072 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
8073 Intrinsic IntLd, Intrinsic IntLd256,
8074 Intrinsic IntSt, Intrinsic IntSt256> {
8075 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
8076 (ins VR128:$src1, f128mem:$src2),
8077 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8078 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
8080 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
8081 (ins VR256:$src1, f256mem:$src2),
8082 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8083 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8085 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
8086 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
8087 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8088 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8089 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
8090 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
8091 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8092 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8095 let ExeDomain = SSEPackedSingle in
8096 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
8097 int_x86_avx_maskload_ps,
8098 int_x86_avx_maskload_ps_256,
8099 int_x86_avx_maskstore_ps,
8100 int_x86_avx_maskstore_ps_256>;
8101 let ExeDomain = SSEPackedDouble in
8102 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
8103 int_x86_avx_maskload_pd,
8104 int_x86_avx_maskload_pd_256,
8105 int_x86_avx_maskstore_pd,
8106 int_x86_avx_maskstore_pd_256>;
8108 //===----------------------------------------------------------------------===//
8109 // VPERMIL - Permute Single and Double Floating-Point Values
8111 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
8112 RegisterClass RC, X86MemOperand x86memop_f,
8113 X86MemOperand x86memop_i, PatFrag i_frag,
8114 Intrinsic IntVar, ValueType vt> {
8115 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
8116 (ins RC:$src1, RC:$src2),
8117 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8118 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V,
8119 Sched<[WriteFShuffle]>;
8120 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
8121 (ins RC:$src1, x86memop_i:$src2),
8122 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8123 [(set RC:$dst, (IntVar RC:$src1,
8124 (bitconvert (i_frag addr:$src2))))]>, VEX_4V,
8125 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8127 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
8128 (ins RC:$src1, u8imm:$src2),
8129 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8130 [(set RC:$dst, (vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
8131 Sched<[WriteFShuffle]>;
8132 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
8133 (ins x86memop_f:$src1, u8imm:$src2),
8134 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8136 (vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
8137 Sched<[WriteFShuffleLd]>;
8140 let ExeDomain = SSEPackedSingle in {
8141 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
8142 loadv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
8143 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
8144 loadv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
8146 let ExeDomain = SSEPackedDouble in {
8147 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
8148 loadv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
8149 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
8150 loadv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
8153 let Predicates = [HasAVX] in {
8154 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (v8i32 VR256:$src2))),
8155 (VPERMILPSYrr VR256:$src1, VR256:$src2)>;
8156 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
8157 (VPERMILPSYrm VR256:$src1, addr:$src2)>;
8158 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (v4i64 VR256:$src2))),
8159 (VPERMILPDYrr VR256:$src1, VR256:$src2)>;
8160 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (loadv4i64 addr:$src2))),
8161 (VPERMILPDYrm VR256:$src1, addr:$src2)>;
8163 def : Pat<(v8i32 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8164 (VPERMILPSYri VR256:$src1, imm:$imm)>;
8165 def : Pat<(v4i64 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8166 (VPERMILPDYri VR256:$src1, imm:$imm)>;
8167 def : Pat<(v8i32 (X86VPermilpi (bc_v8i32 (loadv4i64 addr:$src1)),
8169 (VPERMILPSYmi addr:$src1, imm:$imm)>;
8170 def : Pat<(v4i64 (X86VPermilpi (loadv4i64 addr:$src1), (i8 imm:$imm))),
8171 (VPERMILPDYmi addr:$src1, imm:$imm)>;
8173 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (v4i32 VR128:$src2))),
8174 (VPERMILPSrr VR128:$src1, VR128:$src2)>;
8175 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)))),
8176 (VPERMILPSrm VR128:$src1, addr:$src2)>;
8177 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (v2i64 VR128:$src2))),
8178 (VPERMILPDrr VR128:$src1, VR128:$src2)>;
8179 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (loadv2i64 addr:$src2))),
8180 (VPERMILPDrm VR128:$src1, addr:$src2)>;
8182 def : Pat<(v2i64 (X86VPermilpi VR128:$src1, (i8 imm:$imm))),
8183 (VPERMILPDri VR128:$src1, imm:$imm)>;
8184 def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
8185 (VPERMILPDmi addr:$src1, imm:$imm)>;
8188 //===----------------------------------------------------------------------===//
8189 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
8191 let ExeDomain = SSEPackedSingle in {
8192 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
8193 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8194 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8195 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8196 (i8 imm:$src3))))]>, VEX_4V, VEX_L,
8197 Sched<[WriteFShuffle]>;
8198 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
8199 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8200 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8201 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
8202 (i8 imm:$src3)))]>, VEX_4V, VEX_L,
8203 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8206 let Predicates = [HasAVX] in {
8207 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8208 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8209 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
8210 (loadv4f64 addr:$src2), (i8 imm:$imm))),
8211 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8214 let Predicates = [HasAVX1Only] in {
8215 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8216 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8217 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8218 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8219 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8220 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8221 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8222 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8224 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
8225 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8226 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8227 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
8228 (loadv4i64 addr:$src2), (i8 imm:$imm))),
8229 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8230 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
8231 (bc_v32i8 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8232 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8233 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8234 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8235 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8238 //===----------------------------------------------------------------------===//
8239 // VZERO - Zero YMM registers
8241 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
8242 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
8243 // Zero All YMM registers
8244 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
8245 [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
8247 // Zero Upper bits of YMM registers
8248 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
8249 [(int_x86_avx_vzeroupper)]>, PS, VEX, Requires<[HasAVX]>;
8252 //===----------------------------------------------------------------------===//
8253 // Half precision conversion instructions
8254 //===----------------------------------------------------------------------===//
8255 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8256 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
8257 "vcvtph2ps\t{$src, $dst|$dst, $src}",
8258 [(set RC:$dst, (Int VR128:$src))]>,
8259 T8PD, VEX, Sched<[WriteCvtF2F]>;
8260 let hasSideEffects = 0, mayLoad = 1 in
8261 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
8262 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8PD, VEX,
8263 Sched<[WriteCvtF2FLd]>;
8266 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8267 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
8268 (ins RC:$src1, i32u8imm:$src2),
8269 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8270 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
8271 TAPD, VEX, Sched<[WriteCvtF2F]>;
8272 let hasSideEffects = 0, mayStore = 1,
8273 SchedRW = [WriteCvtF2FLd, WriteRMW] in
8274 def mr : Ii8<0x1D, MRMDestMem, (outs),
8275 (ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
8276 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8280 let Predicates = [HasF16C] in {
8281 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
8282 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
8283 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
8284 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
8286 // Pattern match vcvtph2ps of a scalar i64 load.
8287 def : Pat<(int_x86_vcvtph2ps_128 (vzmovl_v2i64 addr:$src)),
8288 (VCVTPH2PSrm addr:$src)>;
8289 def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)),
8290 (VCVTPH2PSrm addr:$src)>;
8293 // Patterns for matching conversions from float to half-float and vice versa.
8294 let Predicates = [HasF16C] in {
8295 def : Pat<(fp_to_f16 FR32:$src),
8296 (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
8297 (COPY_TO_REGCLASS FR32:$src, VR128), 0)), sub_16bit))>;
8299 def : Pat<(f16_to_fp GR16:$src),
8300 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8301 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)), FR32)) >;
8303 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
8304 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8305 (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 0)), FR32)) >;
8308 //===----------------------------------------------------------------------===//
8309 // AVX2 Instructions
8310 //===----------------------------------------------------------------------===//
8312 /// AVX2_binop_rmi_int - AVX2 binary operator with 8-bit immediate
8313 multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
8314 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
8315 X86MemOperand x86memop> {
8316 let isCommutable = 1 in
8317 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
8318 (ins RC:$src1, RC:$src2, u8imm:$src3),
8319 !strconcat(OpcodeStr,
8320 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8321 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
8322 Sched<[WriteBlend]>, VEX_4V;
8323 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
8324 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
8325 !strconcat(OpcodeStr,
8326 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8329 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
8330 Sched<[WriteBlendLd, ReadAfterLd]>, VEX_4V;
8333 defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
8334 VR128, loadv2i64, i128mem>;
8335 defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
8336 VR256, loadv4i64, i256mem>, VEX_L;
8338 def : Pat<(v4i32 (X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2),
8340 (VPBLENDDrri VR128:$src1, VR128:$src2, imm:$mask)>;
8341 def : Pat<(v8i32 (X86Blendi (v8i32 VR256:$src1), (v8i32 VR256:$src2),
8343 (VPBLENDDYrri VR256:$src1, VR256:$src2, imm:$mask)>;
8345 //===----------------------------------------------------------------------===//
8346 // VPBROADCAST - Load from memory and broadcast to all elements of the
8347 // destination operand
8349 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
8350 X86MemOperand x86memop, PatFrag ld_frag,
8351 Intrinsic Int128, Intrinsic Int256> {
8352 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
8353 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8354 [(set VR128:$dst, (Int128 VR128:$src))]>,
8355 Sched<[WriteShuffle]>, VEX;
8356 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
8357 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8359 (Int128 (scalar_to_vector (ld_frag addr:$src))))]>,
8360 Sched<[WriteLoad]>, VEX;
8361 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
8362 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8363 [(set VR256:$dst, (Int256 VR128:$src))]>,
8364 Sched<[WriteShuffle256]>, VEX, VEX_L;
8365 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
8366 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8368 (Int256 (scalar_to_vector (ld_frag addr:$src))))]>,
8369 Sched<[WriteLoad]>, VEX, VEX_L;
8372 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
8373 int_x86_avx2_pbroadcastb_128,
8374 int_x86_avx2_pbroadcastb_256>;
8375 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
8376 int_x86_avx2_pbroadcastw_128,
8377 int_x86_avx2_pbroadcastw_256>;
8378 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
8379 int_x86_avx2_pbroadcastd_128,
8380 int_x86_avx2_pbroadcastd_256>;
8381 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
8382 int_x86_avx2_pbroadcastq_128,
8383 int_x86_avx2_pbroadcastq_256>;
8385 let Predicates = [HasAVX2] in {
8386 def : Pat<(v16i8 (X86VBroadcast (loadi8 addr:$src))),
8387 (VPBROADCASTBrm addr:$src)>;
8388 def : Pat<(v32i8 (X86VBroadcast (loadi8 addr:$src))),
8389 (VPBROADCASTBYrm addr:$src)>;
8390 def : Pat<(v8i16 (X86VBroadcast (loadi16 addr:$src))),
8391 (VPBROADCASTWrm addr:$src)>;
8392 def : Pat<(v16i16 (X86VBroadcast (loadi16 addr:$src))),
8393 (VPBROADCASTWYrm addr:$src)>;
8394 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8395 (VPBROADCASTDrm addr:$src)>;
8396 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8397 (VPBROADCASTDYrm addr:$src)>;
8398 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
8399 (VPBROADCASTQrm addr:$src)>;
8400 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8401 (VPBROADCASTQYrm addr:$src)>;
8403 def : Pat<(v16i8 (X86VBroadcast (v16i8 VR128:$src))),
8404 (VPBROADCASTBrr VR128:$src)>;
8405 def : Pat<(v32i8 (X86VBroadcast (v16i8 VR128:$src))),
8406 (VPBROADCASTBYrr VR128:$src)>;
8407 def : Pat<(v8i16 (X86VBroadcast (v8i16 VR128:$src))),
8408 (VPBROADCASTWrr VR128:$src)>;
8409 def : Pat<(v16i16 (X86VBroadcast (v8i16 VR128:$src))),
8410 (VPBROADCASTWYrr VR128:$src)>;
8411 def : Pat<(v4i32 (X86VBroadcast (v4i32 VR128:$src))),
8412 (VPBROADCASTDrr VR128:$src)>;
8413 def : Pat<(v8i32 (X86VBroadcast (v4i32 VR128:$src))),
8414 (VPBROADCASTDYrr VR128:$src)>;
8415 def : Pat<(v2i64 (X86VBroadcast (v2i64 VR128:$src))),
8416 (VPBROADCASTQrr VR128:$src)>;
8417 def : Pat<(v4i64 (X86VBroadcast (v2i64 VR128:$src))),
8418 (VPBROADCASTQYrr VR128:$src)>;
8419 def : Pat<(v4f32 (X86VBroadcast (v4f32 VR128:$src))),
8420 (VBROADCASTSSrr VR128:$src)>;
8421 def : Pat<(v8f32 (X86VBroadcast (v4f32 VR128:$src))),
8422 (VBROADCASTSSYrr VR128:$src)>;
8423 def : Pat<(v2f64 (X86VBroadcast (v2f64 VR128:$src))),
8424 (VPBROADCASTQrr VR128:$src)>;
8425 def : Pat<(v4f64 (X86VBroadcast (v2f64 VR128:$src))),
8426 (VBROADCASTSDYrr VR128:$src)>;
8428 // Provide aliases for broadcast from the same regitser class that
8429 // automatically does the extract.
8430 def : Pat<(v32i8 (X86VBroadcast (v32i8 VR256:$src))),
8431 (VPBROADCASTBYrr (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src),
8433 def : Pat<(v16i16 (X86VBroadcast (v16i16 VR256:$src))),
8434 (VPBROADCASTWYrr (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src),
8436 def : Pat<(v8i32 (X86VBroadcast (v8i32 VR256:$src))),
8437 (VPBROADCASTDYrr (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src),
8439 def : Pat<(v4i64 (X86VBroadcast (v4i64 VR256:$src))),
8440 (VPBROADCASTQYrr (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src),
8442 def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256:$src))),
8443 (VBROADCASTSSYrr (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src),
8445 def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
8446 (VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
8449 // Provide fallback in case the load node that is used in the patterns above
8450 // is used by additional users, which prevents the pattern selection.
8451 let AddedComplexity = 20 in {
8452 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8453 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8454 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8455 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8456 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8457 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8459 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8460 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8461 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8462 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8463 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8464 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8466 def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
8467 (VPBROADCASTBrr (COPY_TO_REGCLASS
8468 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8470 def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
8471 (VPBROADCASTBYrr (COPY_TO_REGCLASS
8472 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8475 def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
8476 (VPBROADCASTWrr (COPY_TO_REGCLASS
8477 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8479 def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
8480 (VPBROADCASTWYrr (COPY_TO_REGCLASS
8481 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8484 // The patterns for VPBROADCASTD are not needed because they would match
8485 // the exact same thing as VBROADCASTSS patterns.
8487 def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
8488 (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8489 // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
8493 // AVX1 broadcast patterns
8494 let Predicates = [HasAVX1Only] in {
8495 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8496 (VBROADCASTSSYrm addr:$src)>;
8497 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8498 (VBROADCASTSDYrm addr:$src)>;
8499 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8500 (VBROADCASTSSrm addr:$src)>;
8503 let Predicates = [HasAVX] in {
8504 // Provide fallback in case the load node that is used in the patterns above
8505 // is used by additional users, which prevents the pattern selection.
8506 let AddedComplexity = 20 in {
8507 // 128bit broadcasts:
8508 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8509 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
8510 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8511 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
8512 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
8513 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
8514 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8515 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
8516 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
8517 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
8519 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8520 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
8521 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8522 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
8523 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
8524 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
8525 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8526 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
8527 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
8528 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
8531 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
8532 (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8535 //===----------------------------------------------------------------------===//
8536 // VPERM - Permute instructions
8539 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8540 ValueType OpVT, X86FoldableSchedWrite Sched> {
8541 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8542 (ins VR256:$src1, VR256:$src2),
8543 !strconcat(OpcodeStr,
8544 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8546 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
8547 Sched<[Sched]>, VEX_4V, VEX_L;
8548 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8549 (ins VR256:$src1, i256mem:$src2),
8550 !strconcat(OpcodeStr,
8551 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8553 (OpVT (X86VPermv VR256:$src1,
8554 (bitconvert (mem_frag addr:$src2)))))]>,
8555 Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
8558 defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32, WriteShuffle256>;
8559 let ExeDomain = SSEPackedSingle in
8560 defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
8562 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8563 ValueType OpVT, X86FoldableSchedWrite Sched> {
8564 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
8565 (ins VR256:$src1, u8imm:$src2),
8566 !strconcat(OpcodeStr,
8567 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8569 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8570 Sched<[Sched]>, VEX, VEX_L;
8571 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8572 (ins i256mem:$src1, u8imm:$src2),
8573 !strconcat(OpcodeStr,
8574 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8576 (OpVT (X86VPermi (mem_frag addr:$src1),
8577 (i8 imm:$src2))))]>,
8578 Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
8581 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
8582 WriteShuffle256>, VEX_W;
8583 let ExeDomain = SSEPackedDouble in
8584 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
8585 WriteFShuffle256>, VEX_W;
8587 //===----------------------------------------------------------------------===//
8588 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8590 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8591 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8592 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8593 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8594 (i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
8596 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8597 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8598 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8599 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
8601 Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8603 let Predicates = [HasAVX2] in {
8604 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8605 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8606 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8607 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8608 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8609 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8611 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (loadv4i64 addr:$src2)),
8613 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8614 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8615 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8616 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8617 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)),
8619 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8623 //===----------------------------------------------------------------------===//
8624 // VINSERTI128 - Insert packed integer values
8626 let hasSideEffects = 0 in {
8627 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8628 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
8629 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8630 []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
8632 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8633 (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
8634 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8635 []>, Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8638 let Predicates = [HasAVX2] in {
8639 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8641 (VINSERTI128rr VR256:$src1, VR128:$src2,
8642 (INSERT_get_vinsert128_imm VR256:$ins))>;
8643 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8645 (VINSERTI128rr VR256:$src1, VR128:$src2,
8646 (INSERT_get_vinsert128_imm VR256:$ins))>;
8647 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8649 (VINSERTI128rr VR256:$src1, VR128:$src2,
8650 (INSERT_get_vinsert128_imm VR256:$ins))>;
8651 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8653 (VINSERTI128rr VR256:$src1, VR128:$src2,
8654 (INSERT_get_vinsert128_imm VR256:$ins))>;
8656 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
8658 (VINSERTI128rm VR256:$src1, addr:$src2,
8659 (INSERT_get_vinsert128_imm VR256:$ins))>;
8660 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
8661 (bc_v4i32 (loadv2i64 addr:$src2)),
8663 (VINSERTI128rm VR256:$src1, addr:$src2,
8664 (INSERT_get_vinsert128_imm VR256:$ins))>;
8665 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
8666 (bc_v16i8 (loadv2i64 addr:$src2)),
8668 (VINSERTI128rm VR256:$src1, addr:$src2,
8669 (INSERT_get_vinsert128_imm VR256:$ins))>;
8670 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
8671 (bc_v8i16 (loadv2i64 addr:$src2)),
8673 (VINSERTI128rm VR256:$src1, addr:$src2,
8674 (INSERT_get_vinsert128_imm VR256:$ins))>;
8677 //===----------------------------------------------------------------------===//
8678 // VEXTRACTI128 - Extract packed integer values
8680 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8681 (ins VR256:$src1, u8imm:$src2),
8682 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8684 (int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
8685 Sched<[WriteShuffle256]>, VEX, VEX_L;
8686 let hasSideEffects = 0, mayStore = 1 in
8687 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8688 (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
8689 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8690 Sched<[WriteStore]>, VEX, VEX_L;
8692 let Predicates = [HasAVX2] in {
8693 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8694 (v2i64 (VEXTRACTI128rr
8695 (v4i64 VR256:$src1),
8696 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8697 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8698 (v4i32 (VEXTRACTI128rr
8699 (v8i32 VR256:$src1),
8700 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8701 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8702 (v8i16 (VEXTRACTI128rr
8703 (v16i16 VR256:$src1),
8704 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8705 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8706 (v16i8 (VEXTRACTI128rr
8707 (v32i8 VR256:$src1),
8708 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8710 def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8711 (iPTR imm))), addr:$dst),
8712 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8713 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8714 def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8715 (iPTR imm))), addr:$dst),
8716 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8717 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8718 def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8719 (iPTR imm))), addr:$dst),
8720 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8721 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8722 def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8723 (iPTR imm))), addr:$dst),
8724 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8725 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8728 //===----------------------------------------------------------------------===//
8729 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8731 multiclass avx2_pmovmask<string OpcodeStr,
8732 Intrinsic IntLd128, Intrinsic IntLd256,
8733 Intrinsic IntSt128, Intrinsic IntSt256> {
8734 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8735 (ins VR128:$src1, i128mem:$src2),
8736 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8737 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
8738 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8739 (ins VR256:$src1, i256mem:$src2),
8740 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8741 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8743 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8744 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8745 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8746 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8747 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8748 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8749 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8750 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8753 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8754 int_x86_avx2_maskload_d,
8755 int_x86_avx2_maskload_d_256,
8756 int_x86_avx2_maskstore_d,
8757 int_x86_avx2_maskstore_d_256>;
8758 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8759 int_x86_avx2_maskload_q,
8760 int_x86_avx2_maskload_q_256,
8761 int_x86_avx2_maskstore_q,
8762 int_x86_avx2_maskstore_q_256>, VEX_W;
8764 def: Pat<(masked_store addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src)),
8765 (VMASKMOVPSYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8767 def: Pat<(masked_store addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src)),
8768 (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8770 def: Pat<(masked_store addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src)),
8771 (VMASKMOVPSmr addr:$ptr, VR128:$mask, VR128:$src)>;
8773 def: Pat<(masked_store addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src)),
8774 (VPMASKMOVDmr addr:$ptr, VR128:$mask, VR128:$src)>;
8776 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8777 (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
8779 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask),
8780 (bc_v8f32 (v8i32 immAllZerosV)))),
8781 (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
8783 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src0))),
8784 (VBLENDVPSYrr VR256:$src0, (VMASKMOVPSYrm VR256:$mask, addr:$ptr),
8787 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8788 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8790 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 immAllZerosV))),
8791 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8793 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src0))),
8794 (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
8797 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
8798 (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
8800 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask),
8801 (bc_v4f32 (v4i32 immAllZerosV)))),
8802 (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
8804 def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src0))),
8805 (VBLENDVPSrr VR128:$src0, (VMASKMOVPSrm VR128:$mask, addr:$ptr),
8808 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
8809 (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
8811 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 immAllZerosV))),
8812 (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
8814 def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src0))),
8815 (VBLENDVPSrr VR128:$src0, (VPMASKMOVDrm VR128:$mask, addr:$ptr),
8818 def: Pat<(masked_store addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src)),
8819 (VMASKMOVPDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8821 def: Pat<(masked_store addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src)),
8822 (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8824 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8825 (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
8827 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8828 (v4f64 immAllZerosV))),
8829 (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
8831 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src0))),
8832 (VBLENDVPDYrr VR256:$src0, (VMASKMOVPDYrm VR256:$mask, addr:$ptr),
8835 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8836 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8838 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8839 (bc_v4i64 (v8i32 immAllZerosV)))),
8840 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8842 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src0))),
8843 (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
8846 def: Pat<(masked_store addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src)),
8847 (VMASKMOVPDmr addr:$ptr, VR128:$mask, VR128:$src)>;
8849 def: Pat<(masked_store addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src)),
8850 (VPMASKMOVQmr addr:$ptr, VR128:$mask, VR128:$src)>;
8852 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
8853 (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
8855 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
8856 (v2f64 immAllZerosV))),
8857 (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
8859 def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src0))),
8860 (VBLENDVPDrr VR128:$src0, (VMASKMOVPDrm VR128:$mask, addr:$ptr),
8863 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
8864 (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
8866 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
8867 (bc_v2i64 (v4i32 immAllZerosV)))),
8868 (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
8870 def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src0))),
8871 (VBLENDVPDrr VR128:$src0, (VPMASKMOVQrm VR128:$mask, addr:$ptr),
8874 //===----------------------------------------------------------------------===//
8875 // Variable Bit Shifts
8877 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
8878 ValueType vt128, ValueType vt256> {
8879 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
8880 (ins VR128:$src1, VR128:$src2),
8881 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8883 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
8884 VEX_4V, Sched<[WriteVarVecShift]>;
8885 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
8886 (ins VR128:$src1, i128mem:$src2),
8887 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8889 (vt128 (OpNode VR128:$src1,
8890 (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
8891 VEX_4V, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8892 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8893 (ins VR256:$src1, VR256:$src2),
8894 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8896 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
8897 VEX_4V, VEX_L, Sched<[WriteVarVecShift]>;
8898 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8899 (ins VR256:$src1, i256mem:$src2),
8900 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8902 (vt256 (OpNode VR256:$src1,
8903 (vt256 (bitconvert (loadv4i64 addr:$src2))))))]>,
8904 VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8907 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
8908 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
8909 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
8910 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
8911 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
8913 //===----------------------------------------------------------------------===//
8914 // VGATHER - GATHER Operations
8915 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
8916 X86MemOperand memop128, X86MemOperand memop256> {
8917 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
8918 (ins VR128:$src1, memop128:$src2, VR128:$mask),
8919 !strconcat(OpcodeStr,
8920 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8922 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
8923 (ins RC256:$src1, memop256:$src2, RC256:$mask),
8924 !strconcat(OpcodeStr,
8925 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8926 []>, VEX_4VOp3, VEX_L;
8929 let mayLoad = 1, Constraints
8930 = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
8932 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
8933 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
8934 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
8935 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
8937 let ExeDomain = SSEPackedDouble in {
8938 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
8939 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
8942 let ExeDomain = SSEPackedSingle in {
8943 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
8944 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;