1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
21 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
27 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
28 InstrItinClass arg_ri> {
29 InstrItinClass rr = arg_rr;
30 InstrItinClass rm = arg_rm;
31 InstrItinClass ri = arg_ri;
36 def SSE_ALU_F32S : OpndItins<
37 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
40 def SSE_ALU_F64S : OpndItins<
41 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
44 def SSE_ALU_ITINS_S : SizeItins<
45 SSE_ALU_F32S, SSE_ALU_F64S
48 def SSE_MUL_F32S : OpndItins<
49 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
52 def SSE_MUL_F64S : OpndItins<
53 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
56 def SSE_MUL_ITINS_S : SizeItins<
57 SSE_MUL_F32S, SSE_MUL_F64S
60 def SSE_DIV_F32S : OpndItins<
61 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
64 def SSE_DIV_F64S : OpndItins<
65 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
68 def SSE_DIV_ITINS_S : SizeItins<
69 SSE_DIV_F32S, SSE_DIV_F64S
73 def SSE_ALU_F32P : OpndItins<
74 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
77 def SSE_ALU_F64P : OpndItins<
78 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
81 def SSE_ALU_ITINS_P : SizeItins<
82 SSE_ALU_F32P, SSE_ALU_F64P
85 def SSE_MUL_F32P : OpndItins<
86 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
89 def SSE_MUL_F64P : OpndItins<
90 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
93 def SSE_MUL_ITINS_P : SizeItins<
94 SSE_MUL_F32P, SSE_MUL_F64P
97 def SSE_DIV_F32P : OpndItins<
98 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
101 def SSE_DIV_F64P : OpndItins<
102 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
105 def SSE_DIV_ITINS_P : SizeItins<
106 SSE_DIV_F32P, SSE_DIV_F64P
109 def SSE_BIT_ITINS_P : OpndItins<
110 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
113 def SSE_INTALU_ITINS_P : OpndItins<
114 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
117 def SSE_INTALUQ_ITINS_P : OpndItins<
118 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
121 def SSE_INTMUL_ITINS_P : OpndItins<
122 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
125 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
126 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
129 def SSE_MOVA_ITINS : OpndItins<
130 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
133 def SSE_MOVU_ITINS : OpndItins<
134 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
137 //===----------------------------------------------------------------------===//
138 // SSE 1 & 2 Instructions Classes
139 //===----------------------------------------------------------------------===//
141 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
142 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
143 RegisterClass RC, X86MemOperand x86memop,
146 let isCommutable = 1 in {
147 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
149 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
150 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
151 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr>;
153 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
155 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
156 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
157 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm>;
160 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
161 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
162 string asm, string SSEVer, string FPSizeStr,
163 Operand memopr, ComplexPattern mem_cpat,
166 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
168 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
169 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
170 [(set RC:$dst, (!cast<Intrinsic>(
171 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
172 RC:$src1, RC:$src2))], itins.rr>;
173 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
175 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
176 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
177 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
178 SSEVer, "_", OpcodeStr, FPSizeStr))
179 RC:$src1, mem_cpat:$src2))], itins.rm>;
182 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
183 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
184 RegisterClass RC, ValueType vt,
185 X86MemOperand x86memop, PatFrag mem_frag,
186 Domain d, OpndItins itins, bit Is2Addr = 1> {
187 let isCommutable = 1 in
188 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
190 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
191 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
192 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>;
194 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
196 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
197 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
198 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
202 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
203 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
204 string OpcodeStr, X86MemOperand x86memop,
205 list<dag> pat_rr, list<dag> pat_rm,
207 bit rr_hasSideEffects = 0> {
208 let isCommutable = 1, neverHasSideEffects = rr_hasSideEffects in
209 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
211 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
212 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
213 pat_rr, IIC_DEFAULT, d>;
214 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
216 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
217 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
218 pat_rm, IIC_DEFAULT, d>;
221 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
222 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
223 string asm, string SSEVer, string FPSizeStr,
224 X86MemOperand x86memop, PatFrag mem_frag,
225 Domain d, OpndItins itins, bit Is2Addr = 1> {
226 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
228 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
229 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
230 [(set RC:$dst, (!cast<Intrinsic>(
231 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
232 RC:$src1, RC:$src2))], IIC_DEFAULT, d>;
233 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
235 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
236 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
237 [(set RC:$dst, (!cast<Intrinsic>(
238 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
239 RC:$src1, (mem_frag addr:$src2)))], IIC_DEFAULT, d>;
242 //===----------------------------------------------------------------------===//
243 // Non-instruction patterns
244 //===----------------------------------------------------------------------===//
246 // A vector extract of the first f32/f64 position is a subregister copy
247 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
248 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
249 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
250 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
252 // A 128-bit subvector extract from the first 256-bit vector position
253 // is a subregister copy that needs no instruction.
254 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
255 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
256 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
257 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
259 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
260 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
261 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
262 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
264 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))),
265 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
266 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))),
267 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
269 // A 128-bit subvector insert to the first 256-bit vector position
270 // is a subregister copy that needs no instruction.
271 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
272 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
273 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
274 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
275 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
276 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
277 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
278 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
279 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
280 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
281 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
282 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
284 // Implicitly promote a 32-bit scalar to a vector.
285 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
286 (COPY_TO_REGCLASS FR32:$src, VR128)>;
287 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
288 (COPY_TO_REGCLASS FR32:$src, VR128)>;
289 // Implicitly promote a 64-bit scalar to a vector.
290 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
291 (COPY_TO_REGCLASS FR64:$src, VR128)>;
292 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
293 (COPY_TO_REGCLASS FR64:$src, VR128)>;
295 // Bitcasts between 128-bit vector types. Return the original type since
296 // no instruction is needed for the conversion
297 let Predicates = [HasSSE2] in {
298 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
299 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
300 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
301 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
302 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
303 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
304 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
305 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
306 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
307 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
308 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
309 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
310 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
311 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
312 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
313 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
314 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
315 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
316 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
317 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
318 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
319 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
320 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
321 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
322 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
323 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
324 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
325 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
326 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
327 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
330 // Bitcasts between 256-bit vector types. Return the original type since
331 // no instruction is needed for the conversion
332 let Predicates = [HasAVX] in {
333 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
334 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
335 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
336 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
337 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
338 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
339 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
340 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
341 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
342 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
343 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
344 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
345 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
346 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
347 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
348 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
349 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
350 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
351 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
352 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
353 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
354 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
355 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
356 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
357 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
358 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
359 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
360 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
361 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
362 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
365 // Alias instructions that map fld0 to pxor for sse.
366 // This is expanded by ExpandPostRAPseudos.
367 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
369 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
370 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
371 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
372 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
375 //===----------------------------------------------------------------------===//
376 // AVX & SSE - Zero/One Vectors
377 //===----------------------------------------------------------------------===//
379 // Alias instruction that maps zero vector to pxor / xorp* for sse.
380 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
381 // swizzled by ExecutionDepsFix to pxor.
382 // We set canFoldAsLoad because this can be converted to a constant-pool
383 // load of an all-zeros value if folding it would be beneficial.
384 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
385 isPseudo = 1, neverHasSideEffects = 1 in {
386 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
387 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
390 def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
391 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
392 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
393 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
394 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
395 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
398 // The same as done above but for AVX. The 256-bit ISA does not support PI,
399 // and doesn't need it because on sandy bridge the register is set to zero
400 // at the rename stage without using any execution unit, so SET0PSY
401 // and SET0PDY can be used for vector int instructions without penalty
402 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
403 // JIT implementatioan, it does not expand the instructions below like
404 // X86MCInstLower does.
405 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
406 isCodeGenOnly = 1 in {
407 let Predicates = [HasAVX] in {
408 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
409 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
410 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
411 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
413 let Predicates = [HasAVX2] in
414 def AVX2_SET0 : PDI<0xef, MRMInitReg, (outs VR256:$dst), (ins), "",
415 [(set VR256:$dst, (v4i64 immAllZerosV))]>, VEX_4V;
418 let Predicates = [HasAVX2] in {
419 def : Pat<(v8i32 immAllZerosV), (AVX2_SET0)>;
420 def : Pat<(v16i16 immAllZerosV), (AVX2_SET0)>;
421 def : Pat<(v32i8 immAllZerosV), (AVX2_SET0)>;
424 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
425 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
426 let Predicates = [HasAVX1Only] in {
427 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
428 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
429 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
431 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
432 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
433 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
435 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
436 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
437 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
439 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
440 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
441 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
444 // We set canFoldAsLoad because this can be converted to a constant-pool
445 // load of an all-ones value if folding it would be beneficial.
446 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
447 // JIT implementation, it does not expand the instructions below like
448 // X86MCInstLower does.
449 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
450 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in {
451 let Predicates = [HasAVX] in
452 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
453 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
454 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
455 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
456 let Predicates = [HasAVX2] in
457 def AVX2_SETALLONES : PDI<0x76, MRMInitReg, (outs VR256:$dst), (ins), "",
458 [(set VR256:$dst, (v8i32 immAllOnesV))]>, VEX_4V;
462 //===----------------------------------------------------------------------===//
463 // SSE 1 & 2 - Move FP Scalar Instructions
465 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
466 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
467 // is used instead. Register-to-register movss/movsd is not modeled as an
468 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
469 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
470 //===----------------------------------------------------------------------===//
472 class sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt, string asm> :
473 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
474 [(set VR128:$dst, (vt (OpNode VR128:$src1,
475 (scalar_to_vector RC:$src2))))],
478 // Loading from memory automatically zeroing upper bits.
479 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
480 PatFrag mem_pat, string OpcodeStr> :
481 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
482 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
483 [(set RC:$dst, (mem_pat addr:$src))],
487 def VMOVSSrr : sse12_move_rr<FR32, X86Movss, v4f32,
488 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V,
490 def VMOVSDrr : sse12_move_rr<FR64, X86Movsd, v2f64,
491 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V,
494 // For the disassembler
495 let isCodeGenOnly = 1 in {
496 def VMOVSSrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
497 (ins VR128:$src1, FR32:$src2),
498 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
501 def VMOVSDrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
502 (ins VR128:$src1, FR64:$src2),
503 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
508 let canFoldAsLoad = 1, isReMaterializable = 1 in {
509 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX,
511 let AddedComplexity = 20 in
512 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX,
516 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
517 "movss\t{$src, $dst|$dst, $src}",
518 [(store FR32:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
520 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
521 "movsd\t{$src, $dst|$dst, $src}",
522 [(store FR64:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
526 let Constraints = "$src1 = $dst" in {
527 def MOVSSrr : sse12_move_rr<FR32, X86Movss, v4f32,
528 "movss\t{$src2, $dst|$dst, $src2}">, XS;
529 def MOVSDrr : sse12_move_rr<FR64, X86Movsd, v2f64,
530 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
532 // For the disassembler
533 let isCodeGenOnly = 1 in {
534 def MOVSSrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
535 (ins VR128:$src1, FR32:$src2),
536 "movss\t{$src2, $dst|$dst, $src2}", [],
537 IIC_SSE_MOV_S_RR>, XS;
538 def MOVSDrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
539 (ins VR128:$src1, FR64:$src2),
540 "movsd\t{$src2, $dst|$dst, $src2}", [],
541 IIC_SSE_MOV_S_RR>, XD;
545 let canFoldAsLoad = 1, isReMaterializable = 1 in {
546 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
548 let AddedComplexity = 20 in
549 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
552 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
553 "movss\t{$src, $dst|$dst, $src}",
554 [(store FR32:$src, addr:$dst)], IIC_SSE_MOV_S_MR>;
555 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
556 "movsd\t{$src, $dst|$dst, $src}",
557 [(store FR64:$src, addr:$dst)], IIC_SSE_MOV_S_MR>;
560 let Predicates = [HasAVX] in {
561 let AddedComplexity = 15 in {
562 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
563 // MOVS{S,D} to the lower bits.
564 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
565 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
566 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
567 (VMOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
568 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
569 (VMOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
570 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
571 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
573 // Move low f32 and clear high bits.
574 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
575 (SUBREG_TO_REG (i32 0),
576 (VMOVSSrr (v4f32 (V_SET0)),
577 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)), sub_xmm)>;
578 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
579 (SUBREG_TO_REG (i32 0),
580 (VMOVSSrr (v4i32 (V_SET0)),
581 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)), sub_xmm)>;
584 let AddedComplexity = 20 in {
585 // MOVSSrm zeros the high parts of the register; represent this
586 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
587 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
588 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
589 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
590 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
591 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
592 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
594 // MOVSDrm zeros the high parts of the register; represent this
595 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
596 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
597 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
598 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
599 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
600 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
601 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
602 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
603 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
604 def : Pat<(v2f64 (X86vzload addr:$src)),
605 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
607 // Represent the same patterns above but in the form they appear for
609 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
610 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (i32 0)))),
611 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
612 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
613 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
614 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
615 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
616 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
617 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
619 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
620 (v4f32 (scalar_to_vector FR32:$src)), (i32 0)))),
621 (SUBREG_TO_REG (i32 0),
622 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
624 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
625 (v2f64 (scalar_to_vector FR64:$src)), (i32 0)))),
626 (SUBREG_TO_REG (i64 0),
627 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
629 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
630 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (i32 0)))),
631 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
633 // Move low f64 and clear high bits.
634 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
635 (SUBREG_TO_REG (i32 0),
636 (VMOVSDrr (v2f64 (V_SET0)),
637 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)), sub_xmm)>;
639 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
640 (SUBREG_TO_REG (i32 0),
641 (VMOVSDrr (v2i64 (V_SET0)),
642 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)), sub_xmm)>;
644 // Extract and store.
645 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
647 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
648 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
650 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
652 // Shuffle with VMOVSS
653 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
654 (VMOVSSrr (v4i32 VR128:$src1),
655 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
656 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
657 (VMOVSSrr (v4f32 VR128:$src1),
658 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
661 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
662 (SUBREG_TO_REG (i32 0),
663 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
664 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
666 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
667 (SUBREG_TO_REG (i32 0),
668 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
669 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
672 // Shuffle with VMOVSD
673 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
674 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
675 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
676 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
677 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
678 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
679 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
680 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
683 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
684 (SUBREG_TO_REG (i32 0),
685 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
686 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
688 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
689 (SUBREG_TO_REG (i32 0),
690 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
691 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
695 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
696 // is during lowering, where it's not possible to recognize the fold cause
697 // it has two uses through a bitcast. One use disappears at isel time and the
698 // fold opportunity reappears.
699 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
700 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
701 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
702 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
703 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
704 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
705 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
706 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
709 let Predicates = [HasSSE1] in {
710 let AddedComplexity = 15 in {
711 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
712 // MOVSS to the lower bits.
713 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
714 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
715 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
716 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
717 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
718 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
721 let AddedComplexity = 20 in {
722 // MOVSSrm already zeros the high parts of the register.
723 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
724 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
725 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
726 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
727 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
728 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
731 // Extract and store.
732 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
734 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
736 // Shuffle with MOVSS
737 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
738 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
739 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
740 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
743 let Predicates = [HasSSE2] in {
744 let AddedComplexity = 15 in {
745 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
746 // MOVSD to the lower bits.
747 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
748 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
751 let AddedComplexity = 20 in {
752 // MOVSDrm already zeros the high parts of the register.
753 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
754 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
755 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
756 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
757 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
758 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
759 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
760 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
761 def : Pat<(v2f64 (X86vzload addr:$src)),
762 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
765 // Extract and store.
766 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
768 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
770 // Shuffle with MOVSD
771 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
772 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
773 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
774 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
775 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
776 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
777 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
778 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
780 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
781 // is during lowering, where it's not possible to recognize the fold cause
782 // it has two uses through a bitcast. One use disappears at isel time and the
783 // fold opportunity reappears.
784 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
785 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
786 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
787 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
788 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
789 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
790 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
791 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
794 //===----------------------------------------------------------------------===//
795 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
796 //===----------------------------------------------------------------------===//
798 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
799 X86MemOperand x86memop, PatFrag ld_frag,
800 string asm, Domain d,
802 bit IsReMaterializable = 1> {
803 let neverHasSideEffects = 1 in
804 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
805 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>;
806 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
807 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
808 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
809 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>;
812 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
813 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
815 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
816 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
818 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
819 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
821 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
822 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
825 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
826 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
828 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
829 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
831 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
832 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
834 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
835 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
837 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
838 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
840 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
841 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
843 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
844 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
846 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
847 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
850 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
851 "movaps\t{$src, $dst|$dst, $src}",
852 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
853 IIC_SSE_MOVA_P_MR>, VEX;
854 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
855 "movapd\t{$src, $dst|$dst, $src}",
856 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
857 IIC_SSE_MOVA_P_MR>, VEX;
858 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
859 "movups\t{$src, $dst|$dst, $src}",
860 [(store (v4f32 VR128:$src), addr:$dst)],
861 IIC_SSE_MOVU_P_MR>, VEX;
862 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
863 "movupd\t{$src, $dst|$dst, $src}",
864 [(store (v2f64 VR128:$src), addr:$dst)],
865 IIC_SSE_MOVU_P_MR>, VEX;
866 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
867 "movaps\t{$src, $dst|$dst, $src}",
868 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
869 IIC_SSE_MOVA_P_MR>, VEX;
870 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
871 "movapd\t{$src, $dst|$dst, $src}",
872 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
873 IIC_SSE_MOVA_P_MR>, VEX;
874 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
875 "movups\t{$src, $dst|$dst, $src}",
876 [(store (v8f32 VR256:$src), addr:$dst)],
877 IIC_SSE_MOVU_P_MR>, VEX;
878 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
879 "movupd\t{$src, $dst|$dst, $src}",
880 [(store (v4f64 VR256:$src), addr:$dst)],
881 IIC_SSE_MOVU_P_MR>, VEX;
884 let isCodeGenOnly = 1 in {
885 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
887 "movaps\t{$src, $dst|$dst, $src}", [],
888 IIC_SSE_MOVA_P_RR>, VEX;
889 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
891 "movapd\t{$src, $dst|$dst, $src}", [],
892 IIC_SSE_MOVA_P_RR>, VEX;
893 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
895 "movups\t{$src, $dst|$dst, $src}", [],
896 IIC_SSE_MOVU_P_RR>, VEX;
897 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
899 "movupd\t{$src, $dst|$dst, $src}", [],
900 IIC_SSE_MOVU_P_RR>, VEX;
901 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
903 "movaps\t{$src, $dst|$dst, $src}", [],
904 IIC_SSE_MOVA_P_RR>, VEX;
905 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
907 "movapd\t{$src, $dst|$dst, $src}", [],
908 IIC_SSE_MOVA_P_RR>, VEX;
909 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
911 "movups\t{$src, $dst|$dst, $src}", [],
912 IIC_SSE_MOVU_P_RR>, VEX;
913 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
915 "movupd\t{$src, $dst|$dst, $src}", [],
916 IIC_SSE_MOVU_P_RR>, VEX;
919 let Predicates = [HasAVX] in {
920 def : Pat<(v8i32 (X86vzmovl
921 (insert_subvector undef, (v4i32 VR128:$src), (i32 0)))),
922 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
923 def : Pat<(v4i64 (X86vzmovl
924 (insert_subvector undef, (v2i64 VR128:$src), (i32 0)))),
925 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
926 def : Pat<(v8f32 (X86vzmovl
927 (insert_subvector undef, (v4f32 VR128:$src), (i32 0)))),
928 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
929 def : Pat<(v4f64 (X86vzmovl
930 (insert_subvector undef, (v2f64 VR128:$src), (i32 0)))),
931 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
935 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
936 (VMOVUPSYmr addr:$dst, VR256:$src)>;
937 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
938 (VMOVUPDYmr addr:$dst, VR256:$src)>;
940 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
941 "movaps\t{$src, $dst|$dst, $src}",
942 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
944 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
945 "movapd\t{$src, $dst|$dst, $src}",
946 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
948 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
949 "movups\t{$src, $dst|$dst, $src}",
950 [(store (v4f32 VR128:$src), addr:$dst)],
952 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
953 "movupd\t{$src, $dst|$dst, $src}",
954 [(store (v2f64 VR128:$src), addr:$dst)],
958 let isCodeGenOnly = 1 in {
959 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
960 "movaps\t{$src, $dst|$dst, $src}", [],
962 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
963 "movapd\t{$src, $dst|$dst, $src}", [],
965 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
966 "movups\t{$src, $dst|$dst, $src}", [],
968 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
969 "movupd\t{$src, $dst|$dst, $src}", [],
973 let Predicates = [HasAVX] in {
974 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
975 (VMOVUPSmr addr:$dst, VR128:$src)>;
976 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
977 (VMOVUPDmr addr:$dst, VR128:$src)>;
980 let Predicates = [HasSSE1] in
981 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
982 (MOVUPSmr addr:$dst, VR128:$src)>;
983 let Predicates = [HasSSE2] in
984 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
985 (MOVUPDmr addr:$dst, VR128:$src)>;
987 // Use vmovaps/vmovups for AVX integer load/store.
988 let Predicates = [HasAVX] in {
989 // 128-bit load/store
990 def : Pat<(alignedloadv2i64 addr:$src),
991 (VMOVAPSrm addr:$src)>;
992 def : Pat<(loadv2i64 addr:$src),
993 (VMOVUPSrm addr:$src)>;
995 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
996 (VMOVAPSmr addr:$dst, VR128:$src)>;
997 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
998 (VMOVAPSmr addr:$dst, VR128:$src)>;
999 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1000 (VMOVAPSmr addr:$dst, VR128:$src)>;
1001 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1002 (VMOVAPSmr addr:$dst, VR128:$src)>;
1003 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1004 (VMOVUPSmr addr:$dst, VR128:$src)>;
1005 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1006 (VMOVUPSmr addr:$dst, VR128:$src)>;
1007 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1008 (VMOVUPSmr addr:$dst, VR128:$src)>;
1009 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1010 (VMOVUPSmr addr:$dst, VR128:$src)>;
1012 // 256-bit load/store
1013 def : Pat<(alignedloadv4i64 addr:$src),
1014 (VMOVAPSYrm addr:$src)>;
1015 def : Pat<(loadv4i64 addr:$src),
1016 (VMOVUPSYrm addr:$src)>;
1017 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1018 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1019 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1020 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1021 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1022 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1023 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1024 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1025 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1026 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1027 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1028 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1029 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1030 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1031 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1032 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1035 // Use movaps / movups for SSE integer load / store (one byte shorter).
1036 // The instructions selected below are then converted to MOVDQA/MOVDQU
1037 // during the SSE domain pass.
1038 let Predicates = [HasSSE1] in {
1039 def : Pat<(alignedloadv2i64 addr:$src),
1040 (MOVAPSrm addr:$src)>;
1041 def : Pat<(loadv2i64 addr:$src),
1042 (MOVUPSrm addr:$src)>;
1044 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1045 (MOVAPSmr addr:$dst, VR128:$src)>;
1046 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1047 (MOVAPSmr addr:$dst, VR128:$src)>;
1048 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1049 (MOVAPSmr addr:$dst, VR128:$src)>;
1050 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1051 (MOVAPSmr addr:$dst, VR128:$src)>;
1052 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1053 (MOVUPSmr addr:$dst, VR128:$src)>;
1054 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1055 (MOVUPSmr addr:$dst, VR128:$src)>;
1056 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1057 (MOVUPSmr addr:$dst, VR128:$src)>;
1058 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1059 (MOVUPSmr addr:$dst, VR128:$src)>;
1062 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1063 // bits are disregarded. FIXME: Set encoding to pseudo!
1064 let neverHasSideEffects = 1 in {
1065 def FsVMOVAPSrr : VPSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1066 "movaps\t{$src, $dst|$dst, $src}", [],
1067 IIC_SSE_MOVA_P_RR>, VEX;
1068 def FsVMOVAPDrr : VPDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1069 "movapd\t{$src, $dst|$dst, $src}", [],
1070 IIC_SSE_MOVA_P_RR>, VEX;
1071 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1072 "movaps\t{$src, $dst|$dst, $src}", [],
1074 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1075 "movapd\t{$src, $dst|$dst, $src}", [],
1079 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1080 // bits are disregarded. FIXME: Set encoding to pseudo!
1081 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1082 let isCodeGenOnly = 1 in {
1083 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1084 "movaps\t{$src, $dst|$dst, $src}",
1085 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1086 IIC_SSE_MOVA_P_RM>, VEX;
1087 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1088 "movapd\t{$src, $dst|$dst, $src}",
1089 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1090 IIC_SSE_MOVA_P_RM>, VEX;
1092 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1093 "movaps\t{$src, $dst|$dst, $src}",
1094 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1096 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1097 "movapd\t{$src, $dst|$dst, $src}",
1098 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1102 //===----------------------------------------------------------------------===//
1103 // SSE 1 & 2 - Move Low packed FP Instructions
1104 //===----------------------------------------------------------------------===//
1106 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
1107 SDNode psnode, SDNode pdnode, string base_opc,
1108 string asm_opr, InstrItinClass itin> {
1109 def PSrm : PI<opc, MRMSrcMem,
1110 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1111 !strconcat(base_opc, "s", asm_opr),
1114 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1115 itin, SSEPackedSingle>, TB;
1117 def PDrm : PI<opc, MRMSrcMem,
1118 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
1119 !strconcat(base_opc, "d", asm_opr),
1120 [(set RC:$dst, (v2f64 (pdnode RC:$src1,
1121 (scalar_to_vector (loadf64 addr:$src2)))))],
1122 itin, SSEPackedDouble>, TB, OpSize;
1125 let AddedComplexity = 20 in {
1126 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, X86Movlps, X86Movlpd, "movlp",
1127 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1128 IIC_SSE_MOV_LH>, VEX_4V;
1130 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1131 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, X86Movlps, X86Movlpd, "movlp",
1132 "\t{$src2, $dst|$dst, $src2}",
1136 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1137 "movlps\t{$src, $dst|$dst, $src}",
1138 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1139 (iPTR 0))), addr:$dst)],
1140 IIC_SSE_MOV_LH>, VEX;
1141 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1142 "movlpd\t{$src, $dst|$dst, $src}",
1143 [(store (f64 (vector_extract (v2f64 VR128:$src),
1144 (iPTR 0))), addr:$dst)],
1145 IIC_SSE_MOV_LH>, VEX;
1146 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1147 "movlps\t{$src, $dst|$dst, $src}",
1148 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1149 (iPTR 0))), addr:$dst)],
1151 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1152 "movlpd\t{$src, $dst|$dst, $src}",
1153 [(store (f64 (vector_extract (v2f64 VR128:$src),
1154 (iPTR 0))), addr:$dst)],
1157 let Predicates = [HasAVX] in {
1158 // Shuffle with VMOVLPS
1159 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1160 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1161 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1162 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1164 // Shuffle with VMOVLPD
1165 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1166 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1167 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1168 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1171 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1173 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1174 def : Pat<(store (v4i32 (X86Movlps
1175 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1176 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1177 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1179 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1180 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1182 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1185 let Predicates = [HasSSE1] in {
1186 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1187 def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
1188 (iPTR 0))), addr:$src1),
1189 (MOVLPSmr addr:$src1, VR128:$src2)>;
1191 // Shuffle with MOVLPS
1192 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1193 (MOVLPSrm VR128:$src1, addr:$src2)>;
1194 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1195 (MOVLPSrm VR128:$src1, addr:$src2)>;
1196 def : Pat<(X86Movlps VR128:$src1,
1197 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1198 (MOVLPSrm VR128:$src1, addr:$src2)>;
1201 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1203 (MOVLPSmr addr:$src1, VR128:$src2)>;
1204 def : Pat<(store (v4i32 (X86Movlps
1205 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1207 (MOVLPSmr addr:$src1, VR128:$src2)>;
1210 let Predicates = [HasSSE2] in {
1211 // Shuffle with MOVLPD
1212 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1213 (MOVLPDrm VR128:$src1, addr:$src2)>;
1214 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1215 (MOVLPDrm VR128:$src1, addr:$src2)>;
1218 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1220 (MOVLPDmr addr:$src1, VR128:$src2)>;
1221 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1223 (MOVLPDmr addr:$src1, VR128:$src2)>;
1226 //===----------------------------------------------------------------------===//
1227 // SSE 1 & 2 - Move Hi packed FP Instructions
1228 //===----------------------------------------------------------------------===//
1230 let AddedComplexity = 20 in {
1231 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, X86Movlhps, X86Movlhpd, "movhp",
1232 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1233 IIC_SSE_MOV_LH>, VEX_4V;
1235 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1236 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, X86Movlhps, X86Movlhpd, "movhp",
1237 "\t{$src2, $dst|$dst, $src2}",
1241 // v2f64 extract element 1 is always custom lowered to unpack high to low
1242 // and extract element 0 so the non-store version isn't too horrible.
1243 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1244 "movhps\t{$src, $dst|$dst, $src}",
1245 [(store (f64 (vector_extract
1246 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1247 (bc_v2f64 (v4f32 VR128:$src))),
1248 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1249 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1250 "movhpd\t{$src, $dst|$dst, $src}",
1251 [(store (f64 (vector_extract
1252 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1253 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1254 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1255 "movhps\t{$src, $dst|$dst, $src}",
1256 [(store (f64 (vector_extract
1257 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1258 (bc_v2f64 (v4f32 VR128:$src))),
1259 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1260 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1261 "movhpd\t{$src, $dst|$dst, $src}",
1262 [(store (f64 (vector_extract
1263 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1264 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1266 let Predicates = [HasAVX] in {
1268 def : Pat<(X86Movlhps VR128:$src1,
1269 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1270 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1271 def : Pat<(X86Movlhps VR128:$src1,
1272 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1273 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1275 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1276 // is during lowering, where it's not possible to recognize the load fold
1277 // cause it has two uses through a bitcast. One use disappears at isel time
1278 // and the fold opportunity reappears.
1279 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1280 (scalar_to_vector (loadf64 addr:$src2)))),
1281 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1284 let Predicates = [HasSSE1] in {
1286 def : Pat<(X86Movlhps VR128:$src1,
1287 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1288 (MOVHPSrm VR128:$src1, addr:$src2)>;
1289 def : Pat<(X86Movlhps VR128:$src1,
1290 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1291 (MOVHPSrm VR128:$src1, addr:$src2)>;
1294 let Predicates = [HasSSE2] in {
1295 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1296 // is during lowering, where it's not possible to recognize the load fold
1297 // cause it has two uses through a bitcast. One use disappears at isel time
1298 // and the fold opportunity reappears.
1299 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1300 (scalar_to_vector (loadf64 addr:$src2)))),
1301 (MOVHPDrm VR128:$src1, addr:$src2)>;
1304 //===----------------------------------------------------------------------===//
1305 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1306 //===----------------------------------------------------------------------===//
1308 let AddedComplexity = 20 in {
1309 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1310 (ins VR128:$src1, VR128:$src2),
1311 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1313 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1316 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1317 (ins VR128:$src1, VR128:$src2),
1318 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1320 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1324 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1325 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1326 (ins VR128:$src1, VR128:$src2),
1327 "movlhps\t{$src2, $dst|$dst, $src2}",
1329 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1331 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1332 (ins VR128:$src1, VR128:$src2),
1333 "movhlps\t{$src2, $dst|$dst, $src2}",
1335 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1339 let Predicates = [HasAVX] in {
1341 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1342 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1343 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1344 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1347 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1348 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1351 let Predicates = [HasSSE1] in {
1353 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1354 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1355 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1356 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1359 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1360 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1363 //===----------------------------------------------------------------------===//
1364 // SSE 1 & 2 - Conversion Instructions
1365 //===----------------------------------------------------------------------===//
1367 def SSE_CVT_PD : OpndItins<
1368 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1371 def SSE_CVT_PS : OpndItins<
1372 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1375 def SSE_CVT_Scalar : OpndItins<
1376 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1379 def SSE_CVT_SS2SI_32 : OpndItins<
1380 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1383 def SSE_CVT_SS2SI_64 : OpndItins<
1384 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1387 def SSE_CVT_SD2SI : OpndItins<
1388 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1391 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1392 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1393 string asm, OpndItins itins> {
1394 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1395 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1397 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1398 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1402 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1403 X86MemOperand x86memop, string asm, Domain d,
1405 let neverHasSideEffects = 1 in {
1406 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1409 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1414 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1415 X86MemOperand x86memop, string asm> {
1416 let neverHasSideEffects = 1 in {
1417 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1418 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1420 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1421 (ins DstRC:$src1, x86memop:$src),
1422 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1423 } // neverHasSideEffects = 1
1426 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1427 "cvttss2si\t{$src, $dst|$dst, $src}",
1430 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1431 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1433 XS, VEX, VEX_W, VEX_LIG;
1434 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1435 "cvttsd2si\t{$src, $dst|$dst, $src}",
1438 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1439 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1441 XD, VEX, VEX_W, VEX_LIG;
1443 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1444 // register, but the same isn't true when only using memory operands,
1445 // provide other assembly "l" and "q" forms to address this explicitly
1446 // where appropriate to do so.
1447 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">,
1448 XS, VEX_4V, VEX_LIG;
1449 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1450 XS, VEX_4V, VEX_W, VEX_LIG;
1451 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">,
1452 XD, VEX_4V, VEX_LIG;
1453 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1454 XD, VEX_4V, VEX_W, VEX_LIG;
1456 def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
1457 (VCVTSI2SDrr FR64:$dst, FR64:$src1, GR32:$src)>;
1458 def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
1459 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src)>;
1461 let Predicates = [HasAVX], AddedComplexity = 1 in {
1462 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1463 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1464 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1465 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1466 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1467 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1468 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1469 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1471 def : Pat<(f32 (sint_to_fp GR32:$src)),
1472 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1473 def : Pat<(f32 (sint_to_fp GR64:$src)),
1474 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1475 def : Pat<(f64 (sint_to_fp GR32:$src)),
1476 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1477 def : Pat<(f64 (sint_to_fp GR64:$src)),
1478 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1481 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1482 "cvttss2si\t{$src, $dst|$dst, $src}",
1483 SSE_CVT_SS2SI_32>, XS;
1484 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1485 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1486 SSE_CVT_SS2SI_64>, XS, REX_W;
1487 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1488 "cvttsd2si\t{$src, $dst|$dst, $src}",
1490 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1491 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1492 SSE_CVT_SD2SI>, XD, REX_W;
1493 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1494 "cvtsi2ss\t{$src, $dst|$dst, $src}",
1495 SSE_CVT_Scalar>, XS;
1496 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1497 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1498 SSE_CVT_Scalar>, XS, REX_W;
1499 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1500 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1501 SSE_CVT_Scalar>, XD;
1502 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1503 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1504 SSE_CVT_Scalar>, XD, REX_W;
1506 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1507 // and/or XMM operand(s).
1509 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1510 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1511 string asm, OpndItins itins> {
1512 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1513 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1514 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>;
1515 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1516 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1517 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>;
1520 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1521 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1522 PatFrag ld_frag, string asm, OpndItins itins,
1524 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1526 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1527 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1528 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1530 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1531 (ins DstRC:$src1, x86memop:$src2),
1533 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1534 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1535 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1539 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1540 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si{l}",
1541 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1542 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1543 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si{q}",
1544 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1546 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1547 sdmem, sse_load_f64, "cvtsd2si{l}", SSE_CVT_SD2SI>, XD;
1548 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1549 sdmem, sse_load_f64, "cvtsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
1552 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1553 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss",
1554 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1555 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1556 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1557 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1559 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1560 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd",
1561 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1562 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1563 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1564 SSE_CVT_Scalar, 0>, XD,
1567 let Constraints = "$src1 = $dst" in {
1568 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1569 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1570 "cvtsi2ss", SSE_CVT_Scalar>, XS;
1571 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1572 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1573 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1574 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1575 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1576 "cvtsi2sd", SSE_CVT_Scalar>, XD;
1577 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1578 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1579 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1584 // Aliases for intrinsics
1585 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1586 ssmem, sse_load_f32, "cvttss2si",
1587 SSE_CVT_SS2SI_32>, XS, VEX;
1588 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1589 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1590 "cvttss2si{q}", SSE_CVT_SS2SI_64>,
1592 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1593 sdmem, sse_load_f64, "cvttsd2si",
1594 SSE_CVT_SD2SI>, XD, VEX;
1595 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1596 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1597 "cvttsd2si{q}", SSE_CVT_SD2SI>,
1599 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1600 ssmem, sse_load_f32, "cvttss2si",
1601 SSE_CVT_SS2SI_32>, XS;
1602 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1603 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1604 "cvttss2si{q}", SSE_CVT_SS2SI_64>, XS, REX_W;
1605 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1606 sdmem, sse_load_f64, "cvttsd2si",
1608 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1609 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1610 "cvttsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
1612 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1613 ssmem, sse_load_f32, "cvtss2si{l}",
1614 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1615 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1616 ssmem, sse_load_f32, "cvtss2si{q}",
1617 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1619 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1620 ssmem, sse_load_f32, "cvtss2si{l}",
1621 SSE_CVT_SS2SI_32>, XS;
1622 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1623 ssmem, sse_load_f32, "cvtss2si{q}",
1624 SSE_CVT_SS2SI_64>, XS, REX_W;
1626 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1627 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1628 SSEPackedSingle, SSE_CVT_PS>,
1629 TB, VEX, Requires<[HasAVX]>;
1630 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1631 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1632 SSEPackedSingle, SSE_CVT_PS>,
1633 TB, VEX, Requires<[HasAVX]>;
1635 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1636 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1637 SSEPackedSingle, SSE_CVT_PS>,
1638 TB, Requires<[HasSSE2]>;
1642 // Convert scalar double to scalar single
1643 let neverHasSideEffects = 1 in {
1644 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1645 (ins FR64:$src1, FR64:$src2),
1646 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1647 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG;
1649 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1650 (ins FR64:$src1, f64mem:$src2),
1651 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1652 [], IIC_SSE_CVT_Scalar_RM>,
1653 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG;
1656 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1659 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1660 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1661 [(set FR32:$dst, (fround FR64:$src))],
1662 IIC_SSE_CVT_Scalar_RR>;
1663 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1664 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1665 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1666 IIC_SSE_CVT_Scalar_RM>,
1668 Requires<[HasSSE2, OptForSize]>;
1670 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1671 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1672 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1674 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1675 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>;
1676 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1677 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1678 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1679 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1680 VR128:$src1, sse_load_f64:$src2))],
1681 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>;
1683 let Constraints = "$src1 = $dst" in {
1684 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1685 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1686 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1688 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1689 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[HasSSE2]>;
1690 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1691 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1692 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1693 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1694 VR128:$src1, sse_load_f64:$src2))],
1695 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[HasSSE2]>;
1698 // Convert scalar single to scalar double
1699 // SSE2 instructions with XS prefix
1700 let neverHasSideEffects = 1 in {
1701 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1702 (ins FR32:$src1, FR32:$src2),
1703 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1704 [], IIC_SSE_CVT_Scalar_RR>,
1705 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG;
1707 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1708 (ins FR32:$src1, f32mem:$src2),
1709 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1710 [], IIC_SSE_CVT_Scalar_RM>,
1711 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>;
1714 let AddedComplexity = 1 in { // give AVX priority
1715 def : Pat<(f64 (fextend FR32:$src)),
1716 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>;
1717 def : Pat<(fextend (loadf32 addr:$src)),
1718 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>;
1720 def : Pat<(extloadf32 addr:$src),
1721 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1722 Requires<[HasAVX, OptForSize]>;
1723 def : Pat<(extloadf32 addr:$src),
1724 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1725 Requires<[HasAVX, OptForSpeed]>;
1726 } // AddedComplexity = 1
1728 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1729 "cvtss2sd\t{$src, $dst|$dst, $src}",
1730 [(set FR64:$dst, (fextend FR32:$src))],
1731 IIC_SSE_CVT_Scalar_RR>, XS,
1732 Requires<[HasSSE2]>;
1733 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1734 "cvtss2sd\t{$src, $dst|$dst, $src}",
1735 [(set FR64:$dst, (extloadf32 addr:$src))],
1736 IIC_SSE_CVT_Scalar_RM>, XS,
1737 Requires<[HasSSE2, OptForSize]>;
1739 // extload f32 -> f64. This matches load+fextend because we have a hack in
1740 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1742 // Since these loads aren't folded into the fextend, we have to match it
1744 def : Pat<(fextend (loadf32 addr:$src)),
1745 (CVTSS2SDrm addr:$src)>, Requires<[HasSSE2]>;
1746 def : Pat<(extloadf32 addr:$src),
1747 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[HasSSE2, OptForSpeed]>;
1749 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1750 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1751 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1753 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1754 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>;
1755 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1756 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1757 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1759 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1760 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>;
1761 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1762 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1763 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1764 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1766 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1767 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[HasSSE2]>;
1768 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1769 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1770 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1772 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1773 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[HasSSE2]>;
1776 // Convert packed single/double fp to doubleword
1777 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1778 "cvtps2dq\t{$src, $dst|$dst, $src}",
1779 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1780 IIC_SSE_CVT_PS_RR>, VEX;
1781 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1782 "cvtps2dq\t{$src, $dst|$dst, $src}",
1784 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1785 IIC_SSE_CVT_PS_RM>, VEX;
1786 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1787 "cvtps2dq\t{$src, $dst|$dst, $src}",
1789 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1790 IIC_SSE_CVT_PS_RR>, VEX;
1791 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1792 "cvtps2dq\t{$src, $dst|$dst, $src}",
1794 (int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)))],
1795 IIC_SSE_CVT_PS_RM>, VEX;
1796 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1797 "cvtps2dq\t{$src, $dst|$dst, $src}",
1798 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1800 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1801 "cvtps2dq\t{$src, $dst|$dst, $src}",
1803 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1807 // Convert Packed Double FP to Packed DW Integers
1808 let Predicates = [HasAVX] in {
1809 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1810 // register, but the same isn't true when using memory operands instead.
1811 // Provide other assembly rr and rm forms to address this explicitly.
1812 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1813 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1814 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1818 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1819 (VCVTPD2DQrr VR128:$dst, VR128:$src)>;
1820 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1821 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1823 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))]>, VEX;
1826 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1827 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1829 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX;
1830 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1831 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1833 (int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)))]>,
1835 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
1836 (VCVTPD2DQYrr VR128:$dst, VR256:$src)>;
1839 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1840 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1842 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
1844 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1845 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1846 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
1849 // Convert with truncation packed single/double fp to doubleword
1850 // SSE2 packed instructions with XS prefix
1851 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1852 "cvttps2dq\t{$src, $dst|$dst, $src}",
1854 (int_x86_sse2_cvttps2dq VR128:$src))],
1855 IIC_SSE_CVT_PS_RR>, VEX;
1856 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1857 "cvttps2dq\t{$src, $dst|$dst, $src}",
1858 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1859 (memopv4f32 addr:$src)))],
1860 IIC_SSE_CVT_PS_RM>, VEX;
1861 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1862 "cvttps2dq\t{$src, $dst|$dst, $src}",
1864 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
1865 IIC_SSE_CVT_PS_RR>, VEX;
1866 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1867 "cvttps2dq\t{$src, $dst|$dst, $src}",
1868 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
1869 (memopv8f32 addr:$src)))],
1870 IIC_SSE_CVT_PS_RM>, VEX;
1872 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1873 "cvttps2dq\t{$src, $dst|$dst, $src}",
1874 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
1876 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1877 "cvttps2dq\t{$src, $dst|$dst, $src}",
1879 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
1882 let Predicates = [HasAVX] in {
1883 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1884 (VCVTDQ2PSrr VR128:$src)>;
1885 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
1886 (VCVTDQ2PSrm addr:$src)>;
1888 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
1889 (VCVTDQ2PSrr VR128:$src)>;
1890 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
1891 (VCVTDQ2PSrm addr:$src)>;
1893 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1894 (VCVTTPS2DQrr VR128:$src)>;
1895 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
1896 (VCVTTPS2DQrm addr:$src)>;
1898 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1899 (VCVTDQ2PSYrr VR256:$src)>;
1900 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (memopv4i64 addr:$src)))),
1901 (VCVTDQ2PSYrm addr:$src)>;
1903 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1904 (VCVTTPS2DQYrr VR256:$src)>;
1905 def : Pat<(v8i32 (fp_to_sint (memopv8f32 addr:$src))),
1906 (VCVTTPS2DQYrm addr:$src)>;
1909 let Predicates = [HasSSE2] in {
1910 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1911 (CVTDQ2PSrr VR128:$src)>;
1912 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
1913 (CVTDQ2PSrm addr:$src)>;
1915 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
1916 (CVTDQ2PSrr VR128:$src)>;
1917 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
1918 (CVTDQ2PSrm addr:$src)>;
1920 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1921 (CVTTPS2DQrr VR128:$src)>;
1922 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
1923 (CVTTPS2DQrm addr:$src)>;
1926 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1927 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1929 (int_x86_sse2_cvttpd2dq VR128:$src))],
1930 IIC_SSE_CVT_PD_RR>, VEX;
1932 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1933 // register, but the same isn't true when using memory operands instead.
1934 // Provide other assembly rr and rm forms to address this explicitly.
1937 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
1938 (VCVTTPD2DQrr VR128:$dst, VR128:$src)>;
1939 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1940 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
1941 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1942 (memopv2f64 addr:$src)))],
1943 IIC_SSE_CVT_PD_RM>, VEX;
1946 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1947 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
1949 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
1950 IIC_SSE_CVT_PD_RR>, VEX;
1951 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1952 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
1954 (int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)))],
1955 IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
1956 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
1957 (VCVTTPD2DQYrr VR128:$dst, VR256:$src)>;
1959 let Predicates = [HasAVX] in {
1960 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
1961 (VCVTTPD2DQYrr VR256:$src)>;
1962 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
1963 (VCVTTPD2DQYrm addr:$src)>;
1964 } // Predicates = [HasAVX]
1966 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1967 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1968 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
1970 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1971 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1972 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1973 (memopv2f64 addr:$src)))],
1976 // Convert packed single to packed double
1977 let Predicates = [HasAVX] in {
1978 // SSE2 instructions without OpSize prefix
1979 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1980 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1981 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
1982 IIC_SSE_CVT_PD_RR>, TB, VEX;
1983 let neverHasSideEffects = 1, mayLoad = 1 in
1984 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1985 "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
1986 IIC_SSE_CVT_PD_RM>, TB, VEX;
1987 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1988 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1990 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
1991 IIC_SSE_CVT_PD_RR>, TB, VEX;
1992 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1993 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1995 (int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)))],
1996 IIC_SSE_CVT_PD_RM>, TB, VEX;
1999 let Predicates = [HasSSE2] in {
2000 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2001 "cvtps2pd\t{$src, $dst|$dst, $src}",
2002 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2003 IIC_SSE_CVT_PD_RR>, TB;
2004 let neverHasSideEffects = 1, mayLoad = 1 in
2005 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2006 "cvtps2pd\t{$src, $dst|$dst, $src}", [],
2007 IIC_SSE_CVT_PD_RM>, TB;
2010 // Convert Packed DW Integers to Packed Double FP
2011 let Predicates = [HasAVX] in {
2012 let neverHasSideEffects = 1, mayLoad = 1 in
2013 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2014 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2016 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2017 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2019 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX;
2020 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2021 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2023 (int_x86_avx_cvtdq2_pd_256
2024 (bitconvert (memopv2i64 addr:$src))))]>, VEX;
2025 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2026 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2028 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX;
2031 let neverHasSideEffects = 1, mayLoad = 1 in
2032 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2033 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2035 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2036 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2037 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2040 // AVX 256-bit register conversion intrinsics
2041 let Predicates = [HasAVX] in {
2042 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2043 (VCVTDQ2PDYrr VR128:$src)>;
2044 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2045 (VCVTDQ2PDYrm addr:$src)>;
2046 } // Predicates = [HasAVX]
2048 // Convert packed double to packed single
2049 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2050 // register, but the same isn't true when using memory operands instead.
2051 // Provide other assembly rr and rm forms to address this explicitly.
2052 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2053 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2054 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2055 IIC_SSE_CVT_PD_RR>, VEX;
2058 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2059 (VCVTPD2PSrr VR128:$dst, VR128:$src)>;
2060 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2061 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2063 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2064 IIC_SSE_CVT_PD_RM>, VEX;
2067 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2068 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2070 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2071 IIC_SSE_CVT_PD_RR>, VEX;
2072 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2073 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2075 (int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)))],
2076 IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
2077 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2078 (VCVTPD2PSYrr VR128:$dst, VR256:$src)>;
2080 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2081 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2082 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2084 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2085 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2087 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2091 // AVX 256-bit register conversion intrinsics
2092 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2093 // whenever possible to avoid declaring two versions of each one.
2094 let Predicates = [HasAVX] in {
2095 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2096 (VCVTDQ2PSYrr VR256:$src)>;
2097 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (memopv4i64 addr:$src))),
2098 (VCVTDQ2PSYrm addr:$src)>;
2100 // Match fround and fextend for 128/256-bit conversions
2101 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2102 (VCVTPD2PSYrr VR256:$src)>;
2103 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2104 (VCVTPD2PSYrm addr:$src)>;
2106 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2107 (VCVTPS2PDrr VR128:$src)>;
2108 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2109 (VCVTPS2PDYrr VR128:$src)>;
2110 def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
2111 (VCVTPS2PDYrm addr:$src)>;
2114 let Predicates = [HasSSE2] in {
2115 // Match fextend for 128 conversions
2116 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2117 (CVTPS2PDrr VR128:$src)>;
2120 //===----------------------------------------------------------------------===//
2121 // SSE 1 & 2 - Compare Instructions
2122 //===----------------------------------------------------------------------===//
2124 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2125 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2126 Operand CC, SDNode OpNode, ValueType VT,
2127 PatFrag ld_frag, string asm, string asm_alt,
2129 def rr : SIi8<0xC2, MRMSrcReg,
2130 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2131 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
2133 def rm : SIi8<0xC2, MRMSrcMem,
2134 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2135 [(set RC:$dst, (OpNode (VT RC:$src1),
2136 (ld_frag addr:$src2), imm:$cc))],
2139 // Accept explicit immediate argument form instead of comparison code.
2140 let neverHasSideEffects = 1 in {
2141 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2142 (ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, [],
2143 IIC_SSE_ALU_F32S_RR>;
2145 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2146 (ins RC:$src1, x86memop:$src2, i8imm:$cc), asm_alt, [],
2147 IIC_SSE_ALU_F32S_RM>;
2151 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmpss, f32, loadf32,
2152 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2153 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2155 XS, VEX_4V, VEX_LIG;
2156 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmpsd, f64, loadf64,
2157 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2158 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2159 SSE_ALU_F32S>, // same latency as 32 bit compare
2160 XD, VEX_4V, VEX_LIG;
2162 let Constraints = "$src1 = $dst" in {
2163 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmpss, f32, loadf32,
2164 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2165 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S>,
2167 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmpsd, f64, loadf64,
2168 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2169 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2170 SSE_ALU_F32S>, // same latency as 32 bit compare
2174 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2175 Intrinsic Int, string asm, OpndItins itins> {
2176 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2177 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2178 [(set VR128:$dst, (Int VR128:$src1,
2179 VR128:$src, imm:$cc))],
2181 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2182 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2183 [(set VR128:$dst, (Int VR128:$src1,
2184 (load addr:$src), imm:$cc))],
2188 // Aliases to match intrinsics which expect XMM operand(s).
2189 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2190 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2193 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2194 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2195 SSE_ALU_F32S>, // same latency as f32
2197 let Constraints = "$src1 = $dst" in {
2198 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2199 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2201 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2202 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2203 SSE_ALU_F32S>, // same latency as f32
2208 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2209 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2210 ValueType vt, X86MemOperand x86memop,
2211 PatFrag ld_frag, string OpcodeStr, Domain d> {
2212 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2213 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2214 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2215 IIC_SSE_COMIS_RR, d>;
2216 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2217 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2218 [(set EFLAGS, (OpNode (vt RC:$src1),
2219 (ld_frag addr:$src2)))],
2220 IIC_SSE_COMIS_RM, d>;
2223 let Defs = [EFLAGS] in {
2224 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2225 "ucomiss", SSEPackedSingle>, TB, VEX, VEX_LIG;
2226 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2227 "ucomisd", SSEPackedDouble>, TB, OpSize, VEX,
2229 let Pattern = []<dag> in {
2230 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2231 "comiss", SSEPackedSingle>, TB, VEX,
2233 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2234 "comisd", SSEPackedDouble>, TB, OpSize, VEX,
2238 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2239 load, "ucomiss", SSEPackedSingle>, TB, VEX;
2240 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2241 load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
2243 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2244 load, "comiss", SSEPackedSingle>, TB, VEX;
2245 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2246 load, "comisd", SSEPackedDouble>, TB, OpSize, VEX;
2247 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2248 "ucomiss", SSEPackedSingle>, TB;
2249 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2250 "ucomisd", SSEPackedDouble>, TB, OpSize;
2252 let Pattern = []<dag> in {
2253 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2254 "comiss", SSEPackedSingle>, TB;
2255 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2256 "comisd", SSEPackedDouble>, TB, OpSize;
2259 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2260 load, "ucomiss", SSEPackedSingle>, TB;
2261 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2262 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
2264 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2265 "comiss", SSEPackedSingle>, TB;
2266 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2267 "comisd", SSEPackedDouble>, TB, OpSize;
2268 } // Defs = [EFLAGS]
2270 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2271 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2272 Operand CC, Intrinsic Int, string asm,
2273 string asm_alt, Domain d> {
2274 def rri : PIi8<0xC2, MRMSrcReg,
2275 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2276 [(set RC:$dst, (Int RC:$src1, RC:$src2, imm:$cc))],
2277 IIC_SSE_CMPP_RR, d>;
2278 def rmi : PIi8<0xC2, MRMSrcMem,
2279 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2280 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2), imm:$cc))],
2281 IIC_SSE_CMPP_RM, d>;
2283 // Accept explicit immediate argument form instead of comparison code.
2284 let neverHasSideEffects = 1 in {
2285 def rri_alt : PIi8<0xC2, MRMSrcReg,
2286 (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
2287 asm_alt, [], IIC_SSE_CMPP_RR, d>;
2288 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2289 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
2290 asm_alt, [], IIC_SSE_CMPP_RM, d>;
2294 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2295 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2296 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2297 SSEPackedSingle>, TB, VEX_4V;
2298 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2299 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2300 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2301 SSEPackedDouble>, TB, OpSize, VEX_4V;
2302 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2303 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2304 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2305 SSEPackedSingle>, TB, VEX_4V;
2306 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2307 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2308 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2309 SSEPackedDouble>, TB, OpSize, VEX_4V;
2310 let Constraints = "$src1 = $dst" in {
2311 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2312 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2313 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2314 SSEPackedSingle>, TB;
2315 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2316 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2317 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2318 SSEPackedDouble>, TB, OpSize;
2321 let Predicates = [HasAVX] in {
2322 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2323 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2324 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2325 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2326 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2327 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2328 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2329 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2331 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2332 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2333 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
2334 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2335 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2336 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2337 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
2338 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2341 let Predicates = [HasSSE1] in {
2342 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2343 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2344 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2345 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2348 let Predicates = [HasSSE2] in {
2349 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2350 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2351 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2352 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2355 //===----------------------------------------------------------------------===//
2356 // SSE 1 & 2 - Shuffle Instructions
2357 //===----------------------------------------------------------------------===//
2359 /// sse12_shuffle - sse 1 & 2 shuffle instructions
2360 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2361 ValueType vt, string asm, PatFrag mem_frag,
2362 Domain d, bit IsConvertibleToThreeAddress = 0> {
2363 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2364 (ins RC:$src1, x86memop:$src2, i8imm:$src3), asm,
2365 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2366 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>;
2367 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
2368 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2369 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
2370 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2371 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>;
2374 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2375 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2376 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
2377 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2378 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2379 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
2380 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2381 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2382 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
2383 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2384 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2385 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
2387 let Constraints = "$src1 = $dst" in {
2388 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2389 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2390 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
2392 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2393 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2394 memopv2f64, SSEPackedDouble, 1 /* cvt to pshufd */>,
2398 let Predicates = [HasAVX] in {
2399 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2400 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2401 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2402 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2403 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2405 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2406 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2407 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2408 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2409 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2412 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2413 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2414 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2415 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
2416 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2418 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2419 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2420 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2421 (memopv4i64 addr:$src2), (i8 imm:$imm))),
2422 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2425 let Predicates = [HasSSE1] in {
2426 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2427 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2428 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2429 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2430 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2433 let Predicates = [HasSSE2] in {
2434 // Generic SHUFPD patterns
2435 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2436 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2437 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2438 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2439 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2442 //===----------------------------------------------------------------------===//
2443 // SSE 1 & 2 - Unpack Instructions
2444 //===----------------------------------------------------------------------===//
2446 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
2447 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2448 PatFrag mem_frag, RegisterClass RC,
2449 X86MemOperand x86memop, string asm,
2451 def rr : PI<opc, MRMSrcReg,
2452 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2454 (vt (OpNode RC:$src1, RC:$src2)))],
2456 def rm : PI<opc, MRMSrcMem,
2457 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2459 (vt (OpNode RC:$src1,
2460 (mem_frag addr:$src2))))],
2464 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2465 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2466 SSEPackedSingle>, TB, VEX_4V;
2467 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2468 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2469 SSEPackedDouble>, TB, OpSize, VEX_4V;
2470 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2471 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2472 SSEPackedSingle>, TB, VEX_4V;
2473 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2474 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2475 SSEPackedDouble>, TB, OpSize, VEX_4V;
2477 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, memopv8f32,
2478 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2479 SSEPackedSingle>, TB, VEX_4V;
2480 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, memopv4f64,
2481 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2482 SSEPackedDouble>, TB, OpSize, VEX_4V;
2483 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, memopv8f32,
2484 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2485 SSEPackedSingle>, TB, VEX_4V;
2486 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, memopv4f64,
2487 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2488 SSEPackedDouble>, TB, OpSize, VEX_4V;
2490 let Constraints = "$src1 = $dst" in {
2491 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2492 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2493 SSEPackedSingle>, TB;
2494 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2495 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2496 SSEPackedDouble>, TB, OpSize;
2497 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2498 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2499 SSEPackedSingle>, TB;
2500 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2501 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2502 SSEPackedDouble>, TB, OpSize;
2503 } // Constraints = "$src1 = $dst"
2505 let Predicates = [HasAVX1Only] in {
2506 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
2507 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2508 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2509 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2510 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
2511 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2512 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2513 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2515 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))),
2516 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2517 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2518 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2519 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))),
2520 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2521 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2522 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2525 let Predicates = [HasAVX], AddedComplexity = 1 in {
2526 // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
2527 // problem is during lowering, where it's not possible to recognize the load
2528 // fold cause it has two uses through a bitcast. One use disappears at isel
2529 // time and the fold opportunity reappears.
2530 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2531 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
2534 let Predicates = [HasSSE2] in {
2535 // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
2536 // problem is during lowering, where it's not possible to recognize the load
2537 // fold cause it has two uses through a bitcast. One use disappears at isel
2538 // time and the fold opportunity reappears.
2539 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2540 (UNPCKLPDrr VR128:$src, VR128:$src)>;
2543 //===----------------------------------------------------------------------===//
2544 // SSE 1 & 2 - Extract Floating-Point Sign mask
2545 //===----------------------------------------------------------------------===//
2547 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2548 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2550 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
2551 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2552 [(set GR32:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>;
2553 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
2554 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [],
2555 IIC_SSE_MOVMSK, d>, REX_W;
2558 let Predicates = [HasAVX] in {
2559 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2560 "movmskps", SSEPackedSingle>, TB, VEX;
2561 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2562 "movmskpd", SSEPackedDouble>, TB,
2564 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2565 "movmskps", SSEPackedSingle>, TB, VEX;
2566 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2567 "movmskpd", SSEPackedDouble>, TB,
2570 def : Pat<(i32 (X86fgetsign FR32:$src)),
2571 (VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>;
2572 def : Pat<(i64 (X86fgetsign FR32:$src)),
2573 (VMOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>;
2574 def : Pat<(i32 (X86fgetsign FR64:$src)),
2575 (VMOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>;
2576 def : Pat<(i64 (X86fgetsign FR64:$src)),
2577 (VMOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>;
2580 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2581 "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2582 SSEPackedSingle>, TB, VEX;
2583 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2584 "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2585 SSEPackedDouble>, TB,
2587 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2588 "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2589 SSEPackedSingle>, TB, VEX;
2590 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2591 "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2592 SSEPackedDouble>, TB,
2596 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2597 SSEPackedSingle>, TB;
2598 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2599 SSEPackedDouble>, TB, OpSize;
2601 def : Pat<(i32 (X86fgetsign FR32:$src)),
2602 (MOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>,
2603 Requires<[HasSSE1]>;
2604 def : Pat<(i64 (X86fgetsign FR32:$src)),
2605 (MOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>,
2606 Requires<[HasSSE1]>;
2607 def : Pat<(i32 (X86fgetsign FR64:$src)),
2608 (MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>,
2609 Requires<[HasSSE2]>;
2610 def : Pat<(i64 (X86fgetsign FR64:$src)),
2611 (MOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>,
2612 Requires<[HasSSE2]>;
2614 //===---------------------------------------------------------------------===//
2615 // SSE2 - Packed Integer Logical Instructions
2616 //===---------------------------------------------------------------------===//
2618 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2620 /// PDI_binop_rm - Simple SSE2 binary operator.
2621 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2622 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2623 X86MemOperand x86memop,
2625 bit IsCommutable = 0,
2627 let isCommutable = IsCommutable in
2628 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2629 (ins RC:$src1, RC:$src2),
2631 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2632 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2633 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>;
2634 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2635 (ins RC:$src1, x86memop:$src2),
2637 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2638 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2639 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2640 (bitconvert (memop_frag addr:$src2)))))],
2643 } // ExeDomain = SSEPackedInt
2645 // These are ordered here for pattern ordering requirements with the fp versions
2647 let Predicates = [HasAVX] in {
2648 defm VPAND : PDI_binop_rm<0xDB, "vpand", and, v2i64, VR128, memopv2i64,
2649 i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2650 defm VPOR : PDI_binop_rm<0xEB, "vpor" , or, v2i64, VR128, memopv2i64,
2651 i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2652 defm VPXOR : PDI_binop_rm<0xEF, "vpxor", xor, v2i64, VR128, memopv2i64,
2653 i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2654 defm VPANDN : PDI_binop_rm<0xDF, "vpandn", X86andnp, v2i64, VR128, memopv2i64,
2655 i128mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V;
2658 let Constraints = "$src1 = $dst" in {
2659 defm PAND : PDI_binop_rm<0xDB, "pand", and, v2i64, VR128, memopv2i64,
2660 i128mem, SSE_BIT_ITINS_P, 1>;
2661 defm POR : PDI_binop_rm<0xEB, "por" , or, v2i64, VR128, memopv2i64,
2662 i128mem, SSE_BIT_ITINS_P, 1>;
2663 defm PXOR : PDI_binop_rm<0xEF, "pxor", xor, v2i64, VR128, memopv2i64,
2664 i128mem, SSE_BIT_ITINS_P, 1>;
2665 defm PANDN : PDI_binop_rm<0xDF, "pandn", X86andnp, v2i64, VR128, memopv2i64,
2666 i128mem, SSE_BIT_ITINS_P, 0>;
2667 } // Constraints = "$src1 = $dst"
2669 let Predicates = [HasAVX2] in {
2670 defm VPANDY : PDI_binop_rm<0xDB, "vpand", and, v4i64, VR256, memopv4i64,
2671 i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2672 defm VPORY : PDI_binop_rm<0xEB, "vpor", or, v4i64, VR256, memopv4i64,
2673 i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2674 defm VPXORY : PDI_binop_rm<0xEF, "vpxor", xor, v4i64, VR256, memopv4i64,
2675 i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2676 defm VPANDNY : PDI_binop_rm<0xDF, "vpandn", X86andnp, v4i64, VR256, memopv4i64,
2677 i256mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V;
2680 //===----------------------------------------------------------------------===//
2681 // SSE 1 & 2 - Logical Instructions
2682 //===----------------------------------------------------------------------===//
2684 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
2686 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
2687 SDNode OpNode, OpndItins itins> {
2688 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2689 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, itins, 0>,
2692 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2693 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, itins, 0>,
2696 let Constraints = "$src1 = $dst" in {
2697 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2698 f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
2701 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2702 f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
2707 // Alias bitwise logical operations using SSE logical ops on packed FP values.
2708 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
2710 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
2712 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
2715 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
2716 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef,
2719 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2721 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2723 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2724 // are all promoted to v2i64, and the patterns are covered by the int
2725 // version. This is needed in SSE only, because v2i64 isn't supported on
2726 // SSE1, but only on SSE2.
2727 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2728 !strconcat(OpcodeStr, "ps"), f128mem, [],
2729 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2730 (memopv2i64 addr:$src2)))], 0, 1>, TB, VEX_4V;
2732 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2733 !strconcat(OpcodeStr, "pd"), f128mem,
2734 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2735 (bc_v2i64 (v2f64 VR128:$src2))))],
2736 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2737 (memopv2i64 addr:$src2)))], 0>,
2739 let Constraints = "$src1 = $dst" in {
2740 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2741 !strconcat(OpcodeStr, "ps"), f128mem,
2742 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2743 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2744 (memopv2i64 addr:$src2)))]>, TB;
2746 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2747 !strconcat(OpcodeStr, "pd"), f128mem,
2748 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2749 (bc_v2i64 (v2f64 VR128:$src2))))],
2750 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2751 (memopv2i64 addr:$src2)))]>, TB, OpSize;
2755 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
2757 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
2759 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2760 !strconcat(OpcodeStr, "ps"), f256mem,
2761 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2762 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2763 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
2765 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2766 !strconcat(OpcodeStr, "pd"), f256mem,
2767 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2768 (bc_v4i64 (v4f64 VR256:$src2))))],
2769 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2770 (memopv4i64 addr:$src2)))], 0>,
2774 // AVX 256-bit packed logical ops forms
2775 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
2776 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
2777 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
2778 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
2780 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2781 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2782 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2783 let isCommutable = 0 in
2784 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2786 //===----------------------------------------------------------------------===//
2787 // SSE 1 & 2 - Arithmetic Instructions
2788 //===----------------------------------------------------------------------===//
2790 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2793 /// In addition, we also have a special variant of the scalar form here to
2794 /// represent the associated intrinsic operation. This form is unlike the
2795 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2796 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2798 /// These three forms can each be reg+reg or reg+mem.
2801 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2803 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2806 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2807 OpNode, FR32, f32mem,
2808 itins.s, Is2Addr>, XS;
2809 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2810 OpNode, FR64, f64mem,
2811 itins.d, Is2Addr>, XD;
2814 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
2817 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2818 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins.s, Is2Addr>,
2820 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2821 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins.d, Is2Addr>,
2825 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
2828 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
2829 v8f32, f256mem, memopv8f32, SSEPackedSingle, itins.s, 0>,
2831 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
2832 v4f64, f256mem, memopv4f64, SSEPackedDouble, itins.d, 0>,
2836 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2839 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2840 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
2841 itins.s, Is2Addr>, XS;
2842 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2843 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
2844 itins.d, Is2Addr>, XD;
2847 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
2850 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2851 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
2852 SSEPackedSingle, itins.s, Is2Addr>,
2855 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2856 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
2857 SSEPackedDouble, itins.d, Is2Addr>,
2861 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr,
2863 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2864 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
2865 SSEPackedSingle, itins.s, 0>, TB;
2867 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2868 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
2869 SSEPackedDouble, itins.d, 0>, TB, OpSize;
2872 // Binary Arithmetic instructions
2873 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S, 0>,
2874 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S, 0>,
2876 defm VADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P, 0>,
2877 basic_sse12_fp_binop_p_y<0x58, "add", fadd, SSE_ALU_ITINS_P>,
2879 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S, 0>,
2880 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S, 0>,
2882 defm VMUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P, 0>,
2883 basic_sse12_fp_binop_p_y<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
2886 let isCommutable = 0 in {
2887 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S, 0>,
2888 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S, 0>,
2890 defm VSUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P, 0>,
2891 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
2893 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S, 0>,
2894 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S, 0>,
2896 defm VDIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_ALU_ITINS_P, 0>,
2897 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
2899 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S, 0>,
2900 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S, 0>,
2902 defm VMAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P, 0>,
2903 basic_sse12_fp_binop_p_int<0x5F, "max", SSE_ALU_ITINS_P, 0>,
2904 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
2905 basic_sse12_fp_binop_p_y_int<0x5F, "max", SSE_ALU_ITINS_P>,
2907 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S, 0>,
2908 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S, 0>,
2910 defm VMIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P, 0>,
2911 basic_sse12_fp_binop_p_int<0x5D, "min", SSE_ALU_ITINS_P, 0>,
2912 basic_sse12_fp_binop_p_y_int<0x5D, "min", SSE_ALU_ITINS_P>,
2913 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
2917 let Constraints = "$src1 = $dst" in {
2918 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
2919 basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
2920 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
2921 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
2922 basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
2923 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
2925 let isCommutable = 0 in {
2926 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
2927 basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
2928 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
2929 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
2930 basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
2931 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
2932 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
2933 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
2934 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>,
2935 basic_sse12_fp_binop_p_int<0x5F, "max", SSE_ALU_ITINS_P>;
2936 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
2937 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
2938 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>,
2939 basic_sse12_fp_binop_p_int<0x5D, "min", SSE_ALU_ITINS_P>;
2943 let isCommutable = 1, isCodeGenOnly = 1 in {
2944 defm VMAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S, 0>,
2946 defm VMAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P, 0>,
2947 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>, VEX_4V;
2948 defm VMINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S, 0>,
2950 defm VMINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P, 0>,
2951 basic_sse12_fp_binop_p_y<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>, VEX_4V;
2952 let Constraints = "$src1 = $dst" in {
2953 defm MAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>,
2954 basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>;
2955 defm MINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>,
2956 basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>;
2961 /// In addition, we also have a special variant of the scalar form here to
2962 /// represent the associated intrinsic operation. This form is unlike the
2963 /// plain scalar form, in that it takes an entire vector (instead of a
2964 /// scalar) and leaves the top elements undefined.
2966 /// And, we have a special variant form for a full-vector intrinsic form.
2968 def SSE_SQRTP : OpndItins<
2969 IIC_SSE_SQRTP_RR, IIC_SSE_SQRTP_RM
2972 def SSE_SQRTS : OpndItins<
2973 IIC_SSE_SQRTS_RR, IIC_SSE_SQRTS_RM
2976 def SSE_RCPP : OpndItins<
2977 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
2980 def SSE_RCPS : OpndItins<
2981 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
2984 /// sse1_fp_unop_s - SSE1 unops in scalar form.
2985 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
2986 SDNode OpNode, Intrinsic F32Int, OpndItins itins> {
2987 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2988 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2989 [(set FR32:$dst, (OpNode FR32:$src))]>;
2990 // For scalar unary operations, fold a load into the operation
2991 // only in OptForSize mode. It eliminates an instruction, but it also
2992 // eliminates a whole-register clobber (the load), so it introduces a
2993 // partial register update condition.
2994 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
2995 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2996 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
2997 Requires<[HasSSE1, OptForSize]>;
2998 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2999 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3000 [(set VR128:$dst, (F32Int VR128:$src))], itins.rr>;
3001 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
3002 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3003 [(set VR128:$dst, (F32Int sse_load_f32:$src))], itins.rm>;
3006 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
3007 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
3008 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
3009 !strconcat(OpcodeStr,
3010 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3011 let mayLoad = 1 in {
3012 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
3013 !strconcat(OpcodeStr,
3014 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3015 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3016 (ins VR128:$src1, ssmem:$src2),
3017 !strconcat(OpcodeStr,
3018 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3022 /// sse1_fp_unop_p - SSE1 unops in packed form.
3023 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3025 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3026 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3027 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>;
3028 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3029 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3030 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>;
3033 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
3034 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
3036 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3037 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3038 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3040 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3041 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3042 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))],
3046 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
3047 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3048 Intrinsic V4F32Int, OpndItins itins> {
3049 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3050 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3051 [(set VR128:$dst, (V4F32Int VR128:$src))],
3053 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3054 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3055 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
3059 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
3060 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
3061 Intrinsic V4F32Int, OpndItins itins> {
3062 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3063 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3064 [(set VR256:$dst, (V4F32Int VR256:$src))],
3066 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3067 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3068 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))],
3072 /// sse2_fp_unop_s - SSE2 unops in scalar form.
3073 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
3074 SDNode OpNode, Intrinsic F64Int, OpndItins itins> {
3075 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
3076 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3077 [(set FR64:$dst, (OpNode FR64:$src))], itins.rr>;
3078 // See the comments in sse1_fp_unop_s for why this is OptForSize.
3079 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
3080 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3081 [(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
3082 Requires<[HasSSE2, OptForSize]>;
3083 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3084 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3085 [(set VR128:$dst, (F64Int VR128:$src))], itins.rr>;
3086 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
3087 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3088 [(set VR128:$dst, (F64Int sse_load_f64:$src))], itins.rm>;
3091 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
3092 let hasSideEffects = 0 in
3093 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
3094 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
3095 !strconcat(OpcodeStr,
3096 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3097 let mayLoad = 1 in {
3098 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
3099 !strconcat(OpcodeStr,
3100 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3101 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
3102 (ins VR128:$src1, sdmem:$src2),
3103 !strconcat(OpcodeStr,
3104 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3108 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3109 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3110 SDNode OpNode, OpndItins itins> {
3111 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3112 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3113 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>;
3114 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3115 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3116 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>;
3119 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
3120 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
3122 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3123 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3124 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3126 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3127 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3128 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))],
3132 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
3133 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3134 Intrinsic V2F64Int, OpndItins itins> {
3135 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3136 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3137 [(set VR128:$dst, (V2F64Int VR128:$src))],
3139 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3140 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3141 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))],
3145 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
3146 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
3147 Intrinsic V2F64Int, OpndItins itins> {
3148 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3149 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3150 [(set VR256:$dst, (V2F64Int VR256:$src))],
3152 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3153 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3154 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))],
3158 let Predicates = [HasAVX] in {
3160 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
3161 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V, VEX_LIG;
3163 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3164 sse2_fp_unop_p<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3165 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3166 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3167 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps,
3169 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd,
3171 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256,
3173 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256,
3177 // Reciprocal approximations. Note that these typically require refinement
3178 // in order to obtain suitable precision.
3179 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V, VEX_LIG;
3180 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt, SSE_SQRTP>,
3181 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt, SSE_SQRTP>,
3182 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256,
3184 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps,
3187 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V, VEX_LIG;
3188 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp, SSE_RCPP>,
3189 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp, SSE_RCPP>,
3190 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256,
3192 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps,
3196 let AddedComplexity = 1 in {
3197 def : Pat<(f32 (fsqrt FR32:$src)),
3198 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3199 def : Pat<(f32 (fsqrt (load addr:$src))),
3200 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3201 Requires<[HasAVX, OptForSize]>;
3202 def : Pat<(f64 (fsqrt FR64:$src)),
3203 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
3204 def : Pat<(f64 (fsqrt (load addr:$src))),
3205 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
3206 Requires<[HasAVX, OptForSize]>;
3208 def : Pat<(f32 (X86frsqrt FR32:$src)),
3209 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3210 def : Pat<(f32 (X86frsqrt (load addr:$src))),
3211 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3212 Requires<[HasAVX, OptForSize]>;
3214 def : Pat<(f32 (X86frcp FR32:$src)),
3215 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3216 def : Pat<(f32 (X86frcp (load addr:$src))),
3217 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3218 Requires<[HasAVX, OptForSize]>;
3221 let Predicates = [HasAVX], AddedComplexity = 1 in {
3222 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
3223 (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
3224 (COPY_TO_REGCLASS VR128:$src, FR32)),
3226 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3227 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3229 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
3230 (COPY_TO_REGCLASS (VSQRTSDr (f64 (IMPLICIT_DEF)),
3231 (COPY_TO_REGCLASS VR128:$src, FR64)),
3233 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3234 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3236 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3237 (COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
3238 (COPY_TO_REGCLASS VR128:$src, FR32)),
3240 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
3241 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3243 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3244 (COPY_TO_REGCLASS (VRCPSSr (f32 (IMPLICIT_DEF)),
3245 (COPY_TO_REGCLASS VR128:$src, FR32)),
3247 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
3248 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3252 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss,
3254 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
3255 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps, SSE_SQRTS>,
3256 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd,
3258 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
3259 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd, SSE_SQRTS>;
3261 // Reciprocal approximations. Note that these typically require refinement
3262 // in order to obtain suitable precision.
3263 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss,
3265 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTS>,
3266 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
3268 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss,
3270 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPS>,
3271 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps, SSE_RCPS>;
3273 // There is no f64 version of the reciprocal approximation instructions.
3275 //===----------------------------------------------------------------------===//
3276 // SSE 1 & 2 - Non-temporal stores
3277 //===----------------------------------------------------------------------===//
3279 let AddedComplexity = 400 in { // Prefer non-temporal versions
3280 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3281 (ins f128mem:$dst, VR128:$src),
3282 "movntps\t{$src, $dst|$dst, $src}",
3283 [(alignednontemporalstore (v4f32 VR128:$src),
3285 IIC_SSE_MOVNT>, VEX;
3286 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3287 (ins f128mem:$dst, VR128:$src),
3288 "movntpd\t{$src, $dst|$dst, $src}",
3289 [(alignednontemporalstore (v2f64 VR128:$src),
3291 IIC_SSE_MOVNT>, VEX;
3293 let ExeDomain = SSEPackedInt in
3294 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3295 (ins f128mem:$dst, VR128:$src),
3296 "movntdq\t{$src, $dst|$dst, $src}",
3297 [(alignednontemporalstore (v2i64 VR128:$src),
3299 IIC_SSE_MOVNT>, VEX;
3301 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3302 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
3304 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3305 (ins f256mem:$dst, VR256:$src),
3306 "movntps\t{$src, $dst|$dst, $src}",
3307 [(alignednontemporalstore (v8f32 VR256:$src),
3309 IIC_SSE_MOVNT>, VEX;
3310 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3311 (ins f256mem:$dst, VR256:$src),
3312 "movntpd\t{$src, $dst|$dst, $src}",
3313 [(alignednontemporalstore (v4f64 VR256:$src),
3315 IIC_SSE_MOVNT>, VEX;
3316 let ExeDomain = SSEPackedInt in
3317 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3318 (ins f256mem:$dst, VR256:$src),
3319 "movntdq\t{$src, $dst|$dst, $src}",
3320 [(alignednontemporalstore (v4i64 VR256:$src),
3322 IIC_SSE_MOVNT>, VEX;
3325 let AddedComplexity = 400 in { // Prefer non-temporal versions
3326 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3327 "movntps\t{$src, $dst|$dst, $src}",
3328 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3330 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3331 "movntpd\t{$src, $dst|$dst, $src}",
3332 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3335 let ExeDomain = SSEPackedInt in
3336 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3337 "movntdq\t{$src, $dst|$dst, $src}",
3338 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3341 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3342 (MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3344 // There is no AVX form for instructions below this point
3345 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3346 "movnti{l}\t{$src, $dst|$dst, $src}",
3347 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3349 TB, Requires<[HasSSE2]>;
3350 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3351 "movnti{q}\t{$src, $dst|$dst, $src}",
3352 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3354 TB, Requires<[HasSSE2]>;
3357 //===----------------------------------------------------------------------===//
3358 // SSE 1 & 2 - Prefetch and memory fence
3359 //===----------------------------------------------------------------------===//
3361 // Prefetch intrinsic.
3362 let Predicates = [HasSSE1] in {
3363 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3364 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3365 IIC_SSE_PREFETCH>, TB;
3366 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3367 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3368 IIC_SSE_PREFETCH>, TB;
3369 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3370 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3371 IIC_SSE_PREFETCH>, TB;
3372 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3373 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3374 IIC_SSE_PREFETCH>, TB;
3378 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3379 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3380 IIC_SSE_PREFETCH>, TB, Requires<[HasSSE2]>;
3382 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3383 // was introduced with SSE2, it's backward compatible.
3384 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", [], IIC_SSE_PAUSE>, REP;
3386 // Load, store, and memory fence
3387 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3388 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
3389 TB, Requires<[HasSSE1]>;
3390 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3391 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
3392 TB, Requires<[HasSSE2]>;
3393 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3394 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
3395 TB, Requires<[HasSSE2]>;
3397 def : Pat<(X86SFence), (SFENCE)>;
3398 def : Pat<(X86LFence), (LFENCE)>;
3399 def : Pat<(X86MFence), (MFENCE)>;
3401 //===----------------------------------------------------------------------===//
3402 // SSE 1 & 2 - Load/Store XCSR register
3403 //===----------------------------------------------------------------------===//
3405 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3406 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3407 IIC_SSE_LDMXCSR>, VEX;
3408 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3409 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3410 IIC_SSE_STMXCSR>, VEX;
3412 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3413 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3415 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3416 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3419 //===---------------------------------------------------------------------===//
3420 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3421 //===---------------------------------------------------------------------===//
3423 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3425 let neverHasSideEffects = 1 in {
3426 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3427 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3429 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3430 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3433 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3434 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3436 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3437 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3441 let isCodeGenOnly = 1 in {
3442 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3443 "movdqa\t{$src, $dst|$dst, $src}", [],
3446 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3447 "movdqa\t{$src, $dst|$dst, $src}", [],
3450 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3451 "movdqu\t{$src, $dst|$dst, $src}", [],
3454 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3455 "movdqu\t{$src, $dst|$dst, $src}", [],
3460 let canFoldAsLoad = 1, mayLoad = 1 in {
3461 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3462 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3464 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3465 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3467 let Predicates = [HasAVX] in {
3468 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3469 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3471 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3472 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3477 let mayStore = 1 in {
3478 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3479 (ins i128mem:$dst, VR128:$src),
3480 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3482 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3483 (ins i256mem:$dst, VR256:$src),
3484 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3486 let Predicates = [HasAVX] in {
3487 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3488 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3490 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3491 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3496 let neverHasSideEffects = 1 in
3497 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3498 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
3500 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3501 "movdqu\t{$src, $dst|$dst, $src}",
3502 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[HasSSE2]>;
3505 let isCodeGenOnly = 1 in {
3506 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3507 "movdqa\t{$src, $dst|$dst, $src}", [],
3510 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3511 "movdqu\t{$src, $dst|$dst, $src}",
3512 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[HasSSE2]>;
3515 let canFoldAsLoad = 1, mayLoad = 1 in {
3516 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3517 "movdqa\t{$src, $dst|$dst, $src}",
3518 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
3520 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3521 "movdqu\t{$src, $dst|$dst, $src}",
3522 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
3524 XS, Requires<[HasSSE2]>;
3527 let mayStore = 1 in {
3528 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3529 "movdqa\t{$src, $dst|$dst, $src}",
3530 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
3532 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3533 "movdqu\t{$src, $dst|$dst, $src}",
3534 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
3536 XS, Requires<[HasSSE2]>;
3539 // Intrinsic forms of MOVDQU load and store
3540 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3541 "vmovdqu\t{$src, $dst|$dst, $src}",
3542 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)],
3544 XS, VEX, Requires<[HasAVX]>;
3546 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3547 "movdqu\t{$src, $dst|$dst, $src}",
3548 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)],
3550 XS, Requires<[HasSSE2]>;
3552 } // ExeDomain = SSEPackedInt
3554 let Predicates = [HasAVX] in {
3555 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3556 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3559 //===---------------------------------------------------------------------===//
3560 // SSE2 - Packed Integer Arithmetic Instructions
3561 //===---------------------------------------------------------------------===//
3563 def SSE_PMADD : OpndItins<
3564 IIC_SSE_PMADD, IIC_SSE_PMADD
3567 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3569 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3570 RegisterClass RC, PatFrag memop_frag,
3571 X86MemOperand x86memop,
3573 bit IsCommutable = 0,
3575 let isCommutable = IsCommutable in
3576 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3577 (ins RC:$src1, RC:$src2),
3579 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3580 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3581 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>;
3582 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3583 (ins RC:$src1, x86memop:$src2),
3585 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3586 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3587 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
3591 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3592 string OpcodeStr, SDNode OpNode,
3593 SDNode OpNode2, RegisterClass RC,
3594 ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
3595 ShiftOpndItins itins,
3597 // src2 is always 128-bit
3598 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3599 (ins RC:$src1, VR128:$src2),
3601 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3602 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3603 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
3605 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3606 (ins RC:$src1, i128mem:$src2),
3608 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3609 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3610 [(set RC:$dst, (DstVT (OpNode RC:$src1,
3611 (bc_frag (memopv2i64 addr:$src2)))))], itins.rm>;
3612 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
3613 (ins RC:$src1, i32i8imm:$src2),
3615 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3616 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3617 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i32 imm:$src2))))], itins.ri>;
3620 /// PDI_binop_rm - Simple SSE2 binary operator with different src and dst types
3621 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
3622 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
3623 PatFrag memop_frag, X86MemOperand x86memop,
3625 bit IsCommutable = 0, bit Is2Addr = 1> {
3626 let isCommutable = IsCommutable in
3627 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3628 (ins RC:$src1, RC:$src2),
3630 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3631 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3632 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>;
3633 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3634 (ins RC:$src1, x86memop:$src2),
3636 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3637 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3638 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
3639 (bitconvert (memop_frag addr:$src2)))))]>;
3641 } // ExeDomain = SSEPackedInt
3643 // 128-bit Integer Arithmetic
3645 let Predicates = [HasAVX] in {
3646 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, VR128, memopv2i64,
3647 i128mem, SSE_INTALU_ITINS_P, 1, 0 /*3addr*/>,
3649 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, VR128, memopv2i64,
3650 i128mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3651 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, VR128, memopv2i64,
3652 i128mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3653 defm VPADDQ : PDI_binop_rm<0xD4, "vpaddq", add, v2i64, VR128, memopv2i64,
3654 i128mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V;
3655 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, VR128, memopv2i64,
3656 i128mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3657 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, VR128, memopv2i64,
3658 i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3659 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, VR128, memopv2i64,
3660 i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3661 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, VR128, memopv2i64,
3662 i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3663 defm VPSUBQ : PDI_binop_rm<0xFB, "vpsubq", sub, v2i64, VR128, memopv2i64,
3664 i128mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V;
3665 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
3666 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
3670 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b,
3671 VR128, memopv2i64, i128mem,
3672 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3673 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w,
3674 VR128, memopv2i64, i128mem,
3675 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3676 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b,
3677 VR128, memopv2i64, i128mem,
3678 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3679 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w,
3680 VR128, memopv2i64, i128mem,
3681 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3682 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b,
3683 VR128, memopv2i64, i128mem,
3684 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3685 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w,
3686 VR128, memopv2i64, i128mem,
3687 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3688 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b,
3689 VR128, memopv2i64, i128mem,
3690 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3691 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w,
3692 VR128, memopv2i64, i128mem,
3693 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3694 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w,
3695 VR128, memopv2i64, i128mem,
3696 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3697 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w,
3698 VR128, memopv2i64, i128mem,
3699 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3700 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd,
3701 VR128, memopv2i64, i128mem,
3702 SSE_PMADD, 1, 0>, VEX_4V;
3703 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b,
3704 VR128, memopv2i64, i128mem,
3705 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3706 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w,
3707 VR128, memopv2i64, i128mem,
3708 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3709 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b,
3710 VR128, memopv2i64, i128mem,
3711 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3712 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w,
3713 VR128, memopv2i64, i128mem,
3714 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3715 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b,
3716 VR128, memopv2i64, i128mem,
3717 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3718 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w,
3719 VR128, memopv2i64, i128mem,
3720 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3721 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw,
3722 VR128, memopv2i64, i128mem,
3723 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3726 let Predicates = [HasAVX2] in {
3727 defm VPADDBY : PDI_binop_rm<0xFC, "vpaddb", add, v32i8, VR256, memopv4i64,
3728 i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3729 defm VPADDWY : PDI_binop_rm<0xFD, "vpaddw", add, v16i16, VR256, memopv4i64,
3730 i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3731 defm VPADDDY : PDI_binop_rm<0xFE, "vpaddd", add, v8i32, VR256, memopv4i64,
3732 i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3733 defm VPADDQY : PDI_binop_rm<0xD4, "vpaddq", add, v4i64, VR256, memopv4i64,
3734 i256mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V;
3735 defm VPMULLWY : PDI_binop_rm<0xD5, "vpmullw", mul, v16i16, VR256, memopv4i64,
3736 i256mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3737 defm VPSUBBY : PDI_binop_rm<0xF8, "vpsubb", sub, v32i8, VR256, memopv4i64,
3738 i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3739 defm VPSUBWY : PDI_binop_rm<0xF9, "vpsubw", sub, v16i16,VR256, memopv4i64,
3740 i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3741 defm VPSUBDY : PDI_binop_rm<0xFA, "vpsubd", sub, v8i32, VR256, memopv4i64,
3742 i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3743 defm VPSUBQY : PDI_binop_rm<0xFB, "vpsubq", sub, v4i64, VR256, memopv4i64,
3744 i256mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V;
3745 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
3746 VR256, memopv4i64, i256mem,
3747 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3750 defm VPSUBSBY : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_avx2_psubs_b,
3751 VR256, memopv4i64, i256mem,
3752 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3753 defm VPSUBSWY : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_avx2_psubs_w,
3754 VR256, memopv4i64, i256mem,
3755 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3756 defm VPSUBUSBY : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_avx2_psubus_b,
3757 VR256, memopv4i64, i256mem,
3758 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3759 defm VPSUBUSWY : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_avx2_psubus_w,
3760 VR256, memopv4i64, i256mem,
3761 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3762 defm VPADDSBY : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_avx2_padds_b,
3763 VR256, memopv4i64, i256mem,
3764 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3765 defm VPADDSWY : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_avx2_padds_w,
3766 VR256, memopv4i64, i256mem,
3767 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3768 defm VPADDUSBY : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_avx2_paddus_b,
3769 VR256, memopv4i64, i256mem,
3770 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3771 defm VPADDUSWY : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_avx2_paddus_w,
3772 VR256, memopv4i64, i256mem,
3773 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3774 defm VPMULHUWY : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_avx2_pmulhu_w,
3775 VR256, memopv4i64, i256mem,
3776 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3777 defm VPMULHWY : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_avx2_pmulh_w,
3778 VR256, memopv4i64, i256mem,
3779 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3780 defm VPMADDWDY : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_avx2_pmadd_wd,
3781 VR256, memopv4i64, i256mem,
3782 SSE_PMADD, 1, 0>, VEX_4V;
3783 defm VPAVGBY : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_avx2_pavg_b,
3784 VR256, memopv4i64, i256mem,
3785 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3786 defm VPAVGWY : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_avx2_pavg_w,
3787 VR256, memopv4i64, i256mem,
3788 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3789 defm VPMINUBY : PDI_binop_rm_int<0xDA, "vpminub", int_x86_avx2_pminu_b,
3790 VR256, memopv4i64, i256mem,
3791 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3792 defm VPMINSWY : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_avx2_pmins_w,
3793 VR256, memopv4i64, i256mem,
3794 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3795 defm VPMAXUBY : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_avx2_pmaxu_b,
3796 VR256, memopv4i64, i256mem,
3797 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3798 defm VPMAXSWY : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_avx2_pmaxs_w,
3799 VR256, memopv4i64, i256mem,
3800 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3801 defm VPSADBWY : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_avx2_psad_bw,
3802 VR256, memopv4i64, i256mem,
3803 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3806 let Constraints = "$src1 = $dst" in {
3807 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, VR128, memopv2i64,
3808 i128mem, SSE_INTALU_ITINS_P, 1>;
3809 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, VR128, memopv2i64,
3810 i128mem, SSE_INTALU_ITINS_P, 1>;
3811 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, VR128, memopv2i64,
3812 i128mem, SSE_INTALU_ITINS_P, 1>;
3813 defm PADDQ : PDI_binop_rm<0xD4, "paddq", add, v2i64, VR128, memopv2i64,
3814 i128mem, SSE_INTALUQ_ITINS_P, 1>;
3815 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, VR128, memopv2i64,
3816 i128mem, SSE_INTMUL_ITINS_P, 1>;
3817 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8, VR128, memopv2i64,
3818 i128mem, SSE_INTALU_ITINS_P>;
3819 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16, VR128, memopv2i64,
3820 i128mem, SSE_INTALU_ITINS_P>;
3821 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32, VR128, memopv2i64,
3822 i128mem, SSE_INTALU_ITINS_P>;
3823 defm PSUBQ : PDI_binop_rm<0xFB, "psubq", sub, v2i64, VR128, memopv2i64,
3824 i128mem, SSE_INTALUQ_ITINS_P>;
3825 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
3826 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
3829 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b,
3830 VR128, memopv2i64, i128mem,
3831 SSE_INTALU_ITINS_P>;
3832 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
3833 VR128, memopv2i64, i128mem,
3834 SSE_INTALU_ITINS_P>;
3835 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b,
3836 VR128, memopv2i64, i128mem,
3837 SSE_INTALU_ITINS_P>;
3838 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w,
3839 VR128, memopv2i64, i128mem,
3840 SSE_INTALU_ITINS_P>;
3841 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
3842 VR128, memopv2i64, i128mem,
3843 SSE_INTALU_ITINS_P, 1>;
3844 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w,
3845 VR128, memopv2i64, i128mem,
3846 SSE_INTALU_ITINS_P, 1>;
3847 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
3848 VR128, memopv2i64, i128mem,
3849 SSE_INTALU_ITINS_P, 1>;
3850 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
3851 VR128, memopv2i64, i128mem,
3852 SSE_INTALU_ITINS_P, 1>;
3853 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w,
3854 VR128, memopv2i64, i128mem,
3855 SSE_INTMUL_ITINS_P, 1>;
3856 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w,
3857 VR128, memopv2i64, i128mem,
3858 SSE_INTMUL_ITINS_P, 1>;
3859 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
3860 VR128, memopv2i64, i128mem,
3862 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
3863 VR128, memopv2i64, i128mem,
3864 SSE_INTALU_ITINS_P, 1>;
3865 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
3866 VR128, memopv2i64, i128mem,
3867 SSE_INTALU_ITINS_P, 1>;
3868 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b,
3869 VR128, memopv2i64, i128mem,
3870 SSE_INTALU_ITINS_P, 1>;
3871 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w,
3872 VR128, memopv2i64, i128mem,
3873 SSE_INTALU_ITINS_P, 1>;
3874 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b,
3875 VR128, memopv2i64, i128mem,
3876 SSE_INTALU_ITINS_P, 1>;
3877 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w,
3878 VR128, memopv2i64, i128mem,
3879 SSE_INTALU_ITINS_P, 1>;
3880 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
3881 VR128, memopv2i64, i128mem,
3882 SSE_INTALU_ITINS_P, 1>;
3884 } // Constraints = "$src1 = $dst"
3886 //===---------------------------------------------------------------------===//
3887 // SSE2 - Packed Integer Logical Instructions
3888 //===---------------------------------------------------------------------===//
3890 let Predicates = [HasAVX] in {
3891 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
3892 VR128, v8i16, v8i16, bc_v8i16,
3893 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3894 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
3895 VR128, v4i32, v4i32, bc_v4i32,
3896 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3897 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
3898 VR128, v2i64, v2i64, bc_v2i64,
3899 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3901 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
3902 VR128, v8i16, v8i16, bc_v8i16,
3903 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3904 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
3905 VR128, v4i32, v4i32, bc_v4i32,
3906 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3907 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
3908 VR128, v2i64, v2i64, bc_v2i64,
3909 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3911 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
3912 VR128, v8i16, v8i16, bc_v8i16,
3913 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3914 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
3915 VR128, v4i32, v4i32, bc_v4i32,
3916 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3918 let ExeDomain = SSEPackedInt in {
3919 // 128-bit logical shifts.
3920 def VPSLLDQri : PDIi8<0x73, MRM7r,
3921 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3922 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3924 (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>,
3926 def VPSRLDQri : PDIi8<0x73, MRM3r,
3927 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3928 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3930 (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>,
3932 // PSRADQri doesn't exist in SSE[1-3].
3934 } // Predicates = [HasAVX]
3936 let Predicates = [HasAVX2] in {
3937 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
3938 VR256, v16i16, v8i16, bc_v8i16,
3939 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3940 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
3941 VR256, v8i32, v4i32, bc_v4i32,
3942 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3943 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
3944 VR256, v4i64, v2i64, bc_v2i64,
3945 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3947 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
3948 VR256, v16i16, v8i16, bc_v8i16,
3949 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3950 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
3951 VR256, v8i32, v4i32, bc_v4i32,
3952 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3953 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
3954 VR256, v4i64, v2i64, bc_v2i64,
3955 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3957 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
3958 VR256, v16i16, v8i16, bc_v8i16,
3959 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3960 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
3961 VR256, v8i32, v4i32, bc_v4i32,
3962 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3964 let ExeDomain = SSEPackedInt in {
3965 // 256-bit logical shifts.
3966 def VPSLLDQYri : PDIi8<0x73, MRM7r,
3967 (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
3968 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3970 (int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>,
3972 def VPSRLDQYri : PDIi8<0x73, MRM3r,
3973 (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
3974 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3976 (int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>,
3978 // PSRADQYri doesn't exist in SSE[1-3].
3980 } // Predicates = [HasAVX2]
3982 let Constraints = "$src1 = $dst" in {
3983 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
3984 VR128, v8i16, v8i16, bc_v8i16,
3985 SSE_INTSHIFT_ITINS_P>;
3986 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
3987 VR128, v4i32, v4i32, bc_v4i32,
3988 SSE_INTSHIFT_ITINS_P>;
3989 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
3990 VR128, v2i64, v2i64, bc_v2i64,
3991 SSE_INTSHIFT_ITINS_P>;
3993 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
3994 VR128, v8i16, v8i16, bc_v8i16,
3995 SSE_INTSHIFT_ITINS_P>;
3996 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
3997 VR128, v4i32, v4i32, bc_v4i32,
3998 SSE_INTSHIFT_ITINS_P>;
3999 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
4000 VR128, v2i64, v2i64, bc_v2i64,
4001 SSE_INTSHIFT_ITINS_P>;
4003 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
4004 VR128, v8i16, v8i16, bc_v8i16,
4005 SSE_INTSHIFT_ITINS_P>;
4006 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
4007 VR128, v4i32, v4i32, bc_v4i32,
4008 SSE_INTSHIFT_ITINS_P>;
4010 let ExeDomain = SSEPackedInt in {
4011 // 128-bit logical shifts.
4012 def PSLLDQri : PDIi8<0x73, MRM7r,
4013 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4014 "pslldq\t{$src2, $dst|$dst, $src2}",
4016 (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>;
4017 def PSRLDQri : PDIi8<0x73, MRM3r,
4018 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4019 "psrldq\t{$src2, $dst|$dst, $src2}",
4021 (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>;
4022 // PSRADQri doesn't exist in SSE[1-3].
4024 } // Constraints = "$src1 = $dst"
4026 let Predicates = [HasAVX] in {
4027 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4028 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4029 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4030 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4031 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4032 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4034 // Shift up / down and insert zero's.
4035 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4036 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4037 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4038 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4041 let Predicates = [HasAVX2] in {
4042 def : Pat<(int_x86_avx2_psll_dq VR256:$src1, imm:$src2),
4043 (VPSLLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4044 def : Pat<(int_x86_avx2_psrl_dq VR256:$src1, imm:$src2),
4045 (VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4048 let Predicates = [HasSSE2] in {
4049 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4050 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4051 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4052 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4053 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4054 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4056 // Shift up / down and insert zero's.
4057 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4058 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4059 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4060 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4063 //===---------------------------------------------------------------------===//
4064 // SSE2 - Packed Integer Comparison Instructions
4065 //===---------------------------------------------------------------------===//
4067 let Predicates = [HasAVX] in {
4068 defm VPCMPEQB : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v16i8,
4069 VR128, memopv2i64, i128mem,
4070 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4071 defm VPCMPEQW : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v8i16,
4072 VR128, memopv2i64, i128mem,
4073 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4074 defm VPCMPEQD : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v4i32,
4075 VR128, memopv2i64, i128mem,
4076 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4077 defm VPCMPGTB : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v16i8,
4078 VR128, memopv2i64, i128mem,
4079 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4080 defm VPCMPGTW : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v8i16,
4081 VR128, memopv2i64, i128mem,
4082 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4083 defm VPCMPGTD : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v4i32,
4084 VR128, memopv2i64, i128mem,
4085 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4088 let Predicates = [HasAVX2] in {
4089 defm VPCMPEQBY : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v32i8,
4090 VR256, memopv4i64, i256mem,
4091 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4092 defm VPCMPEQWY : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v16i16,
4093 VR256, memopv4i64, i256mem,
4094 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4095 defm VPCMPEQDY : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v8i32,
4096 VR256, memopv4i64, i256mem,
4097 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4098 defm VPCMPGTBY : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v32i8,
4099 VR256, memopv4i64, i256mem,
4100 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4101 defm VPCMPGTWY : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v16i16,
4102 VR256, memopv4i64, i256mem,
4103 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4104 defm VPCMPGTDY : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v8i32,
4105 VR256, memopv4i64, i256mem,
4106 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4109 let Constraints = "$src1 = $dst" in {
4110 defm PCMPEQB : PDI_binop_rm<0x74, "pcmpeqb", X86pcmpeq, v16i8,
4111 VR128, memopv2i64, i128mem,
4112 SSE_INTALU_ITINS_P, 1>;
4113 defm PCMPEQW : PDI_binop_rm<0x75, "pcmpeqw", X86pcmpeq, v8i16,
4114 VR128, memopv2i64, i128mem,
4115 SSE_INTALU_ITINS_P, 1>;
4116 defm PCMPEQD : PDI_binop_rm<0x76, "pcmpeqd", X86pcmpeq, v4i32,
4117 VR128, memopv2i64, i128mem,
4118 SSE_INTALU_ITINS_P, 1>;
4119 defm PCMPGTB : PDI_binop_rm<0x64, "pcmpgtb", X86pcmpgt, v16i8,
4120 VR128, memopv2i64, i128mem,
4121 SSE_INTALU_ITINS_P>;
4122 defm PCMPGTW : PDI_binop_rm<0x65, "pcmpgtw", X86pcmpgt, v8i16,
4123 VR128, memopv2i64, i128mem,
4124 SSE_INTALU_ITINS_P>;
4125 defm PCMPGTD : PDI_binop_rm<0x66, "pcmpgtd", X86pcmpgt, v4i32,
4126 VR128, memopv2i64, i128mem,
4127 SSE_INTALU_ITINS_P>;
4128 } // Constraints = "$src1 = $dst"
4130 //===---------------------------------------------------------------------===//
4131 // SSE2 - Packed Integer Pack Instructions
4132 //===---------------------------------------------------------------------===//
4134 let Predicates = [HasAVX] in {
4135 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
4136 VR128, memopv2i64, i128mem,
4137 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4138 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
4139 VR128, memopv2i64, i128mem,
4140 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4141 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
4142 VR128, memopv2i64, i128mem,
4143 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4146 let Predicates = [HasAVX2] in {
4147 defm VPACKSSWBY : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_avx2_packsswb,
4148 VR256, memopv4i64, i256mem,
4149 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4150 defm VPACKSSDWY : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_avx2_packssdw,
4151 VR256, memopv4i64, i256mem,
4152 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4153 defm VPACKUSWBY : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_avx2_packuswb,
4154 VR256, memopv4i64, i256mem,
4155 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4158 let Constraints = "$src1 = $dst" in {
4159 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128,
4160 VR128, memopv2i64, i128mem,
4161 SSE_INTALU_ITINS_P>;
4162 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128,
4163 VR128, memopv2i64, i128mem,
4164 SSE_INTALU_ITINS_P>;
4165 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128,
4166 VR128, memopv2i64, i128mem,
4167 SSE_INTALU_ITINS_P>;
4168 } // Constraints = "$src1 = $dst"
4170 //===---------------------------------------------------------------------===//
4171 // SSE2 - Packed Integer Shuffle Instructions
4172 //===---------------------------------------------------------------------===//
4174 let ExeDomain = SSEPackedInt in {
4175 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, SDNode OpNode> {
4176 def ri : Ii8<0x70, MRMSrcReg,
4177 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
4178 !strconcat(OpcodeStr,
4179 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4180 [(set VR128:$dst, (vt (OpNode VR128:$src1, (i8 imm:$src2))))],
4182 def mi : Ii8<0x70, MRMSrcMem,
4183 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
4184 !strconcat(OpcodeStr,
4185 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4187 (vt (OpNode (bitconvert (memopv2i64 addr:$src1)),
4192 multiclass sse2_pshuffle_y<string OpcodeStr, ValueType vt, SDNode OpNode> {
4193 def Yri : Ii8<0x70, MRMSrcReg,
4194 (outs VR256:$dst), (ins VR256:$src1, i8imm:$src2),
4195 !strconcat(OpcodeStr,
4196 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4197 [(set VR256:$dst, (vt (OpNode VR256:$src1, (i8 imm:$src2))))]>;
4198 def Ymi : Ii8<0x70, MRMSrcMem,
4199 (outs VR256:$dst), (ins i256mem:$src1, i8imm:$src2),
4200 !strconcat(OpcodeStr,
4201 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4203 (vt (OpNode (bitconvert (memopv4i64 addr:$src1)),
4204 (i8 imm:$src2))))]>;
4206 } // ExeDomain = SSEPackedInt
4208 let Predicates = [HasAVX] in {
4209 let AddedComplexity = 5 in
4210 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, X86PShufd>, TB, OpSize, VEX;
4212 // SSE2 with ImmT == Imm8 and XS prefix.
4213 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, X86PShufhw>, XS, VEX;
4215 // SSE2 with ImmT == Imm8 and XD prefix.
4216 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, X86PShuflw>, XD, VEX;
4218 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4219 (VPSHUFDmi addr:$src1, imm:$imm)>;
4220 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4221 (VPSHUFDri VR128:$src1, imm:$imm)>;
4224 let Predicates = [HasAVX2] in {
4225 defm VPSHUFD : sse2_pshuffle_y<"vpshufd", v8i32, X86PShufd>, TB, OpSize, VEX;
4226 defm VPSHUFHW : sse2_pshuffle_y<"vpshufhw", v16i16, X86PShufhw>, XS, VEX;
4227 defm VPSHUFLW : sse2_pshuffle_y<"vpshuflw", v16i16, X86PShuflw>, XD, VEX;
4230 let Predicates = [HasSSE2] in {
4231 let AddedComplexity = 5 in
4232 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, X86PShufd>, TB, OpSize;
4234 // SSE2 with ImmT == Imm8 and XS prefix.
4235 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, X86PShufhw>, XS;
4237 // SSE2 with ImmT == Imm8 and XD prefix.
4238 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, X86PShuflw>, XD;
4240 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4241 (PSHUFDmi addr:$src1, imm:$imm)>;
4242 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4243 (PSHUFDri VR128:$src1, imm:$imm)>;
4246 //===---------------------------------------------------------------------===//
4247 // SSE2 - Packed Integer Unpack Instructions
4248 //===---------------------------------------------------------------------===//
4250 let ExeDomain = SSEPackedInt in {
4251 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4252 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
4253 def rr : PDI<opc, MRMSrcReg,
4254 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4256 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4257 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4258 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4260 def rm : PDI<opc, MRMSrcMem,
4261 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4263 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4264 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4265 [(set VR128:$dst, (OpNode VR128:$src1,
4266 (bc_frag (memopv2i64
4271 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4272 SDNode OpNode, PatFrag bc_frag> {
4273 def Yrr : PDI<opc, MRMSrcReg,
4274 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4275 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4276 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>;
4277 def Yrm : PDI<opc, MRMSrcMem,
4278 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4279 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4280 [(set VR256:$dst, (OpNode VR256:$src1,
4281 (bc_frag (memopv4i64 addr:$src2))))]>;
4284 let Predicates = [HasAVX] in {
4285 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4286 bc_v16i8, 0>, VEX_4V;
4287 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4288 bc_v8i16, 0>, VEX_4V;
4289 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4290 bc_v4i32, 0>, VEX_4V;
4291 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4292 bc_v2i64, 0>, VEX_4V;
4294 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4295 bc_v16i8, 0>, VEX_4V;
4296 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4297 bc_v8i16, 0>, VEX_4V;
4298 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4299 bc_v4i32, 0>, VEX_4V;
4300 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4301 bc_v2i64, 0>, VEX_4V;
4304 let Predicates = [HasAVX2] in {
4305 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
4307 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
4309 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
4311 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
4314 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
4316 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
4318 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
4320 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
4324 let Constraints = "$src1 = $dst" in {
4325 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4327 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4329 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4331 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4334 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4336 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4338 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4340 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4343 } // ExeDomain = SSEPackedInt
4345 //===---------------------------------------------------------------------===//
4346 // SSE2 - Packed Integer Extract and Insert
4347 //===---------------------------------------------------------------------===//
4349 let ExeDomain = SSEPackedInt in {
4350 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4351 def rri : Ii8<0xC4, MRMSrcReg,
4352 (outs VR128:$dst), (ins VR128:$src1,
4353 GR32:$src2, i32i8imm:$src3),
4355 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4356 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4358 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))], IIC_SSE_PINSRW>;
4359 def rmi : Ii8<0xC4, MRMSrcMem,
4360 (outs VR128:$dst), (ins VR128:$src1,
4361 i16mem:$src2, i32i8imm:$src3),
4363 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4364 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4366 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4367 imm:$src3))], IIC_SSE_PINSRW>;
4371 let Predicates = [HasAVX] in
4372 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4373 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
4374 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4375 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
4376 imm:$src2))]>, TB, OpSize, VEX;
4377 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4378 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
4379 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4380 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
4381 imm:$src2))], IIC_SSE_PEXTRW>;
4384 let Predicates = [HasAVX] in {
4385 defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
4386 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
4387 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4388 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
4389 []>, TB, OpSize, VEX_4V;
4392 let Constraints = "$src1 = $dst" in
4393 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
4395 } // ExeDomain = SSEPackedInt
4397 //===---------------------------------------------------------------------===//
4398 // SSE2 - Packed Mask Creation
4399 //===---------------------------------------------------------------------===//
4401 let ExeDomain = SSEPackedInt in {
4403 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
4404 "pmovmskb\t{$src, $dst|$dst, $src}",
4405 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4406 IIC_SSE_MOVMSK>, VEX;
4407 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
4408 "pmovmskb\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK>, VEX;
4410 let Predicates = [HasAVX2] in {
4411 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
4412 "pmovmskb\t{$src, $dst|$dst, $src}",
4413 [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX;
4414 def VPMOVMSKBYr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
4415 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
4418 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
4419 "pmovmskb\t{$src, $dst|$dst, $src}",
4420 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4423 } // ExeDomain = SSEPackedInt
4425 //===---------------------------------------------------------------------===//
4426 // SSE2 - Conditional Store
4427 //===---------------------------------------------------------------------===//
4429 let ExeDomain = SSEPackedInt in {
4432 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4433 (ins VR128:$src, VR128:$mask),
4434 "maskmovdqu\t{$mask, $src|$src, $mask}",
4435 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4436 IIC_SSE_MASKMOV>, VEX;
4438 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4439 (ins VR128:$src, VR128:$mask),
4440 "maskmovdqu\t{$mask, $src|$src, $mask}",
4441 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4442 IIC_SSE_MASKMOV>, VEX;
4445 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4446 "maskmovdqu\t{$mask, $src|$src, $mask}",
4447 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4450 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4451 "maskmovdqu\t{$mask, $src|$src, $mask}",
4452 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4455 } // ExeDomain = SSEPackedInt
4457 //===---------------------------------------------------------------------===//
4458 // SSE2 - Move Doubleword
4459 //===---------------------------------------------------------------------===//
4461 //===---------------------------------------------------------------------===//
4462 // Move Int Doubleword to Packed Double Int
4464 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4465 "movd\t{$src, $dst|$dst, $src}",
4467 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4469 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4470 "movd\t{$src, $dst|$dst, $src}",
4472 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4475 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4476 "mov{d|q}\t{$src, $dst|$dst, $src}",
4478 (v2i64 (scalar_to_vector GR64:$src)))],
4479 IIC_SSE_MOVDQ>, VEX;
4480 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4481 "mov{d|q}\t{$src, $dst|$dst, $src}",
4482 [(set FR64:$dst, (bitconvert GR64:$src))],
4483 IIC_SSE_MOVDQ>, VEX;
4485 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4486 "movd\t{$src, $dst|$dst, $src}",
4488 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>;
4489 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4490 "movd\t{$src, $dst|$dst, $src}",
4492 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4494 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4495 "mov{d|q}\t{$src, $dst|$dst, $src}",
4497 (v2i64 (scalar_to_vector GR64:$src)))],
4499 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4500 "mov{d|q}\t{$src, $dst|$dst, $src}",
4501 [(set FR64:$dst, (bitconvert GR64:$src))],
4504 //===---------------------------------------------------------------------===//
4505 // Move Int Doubleword to Single Scalar
4507 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4508 "movd\t{$src, $dst|$dst, $src}",
4509 [(set FR32:$dst, (bitconvert GR32:$src))],
4510 IIC_SSE_MOVDQ>, VEX;
4512 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4513 "movd\t{$src, $dst|$dst, $src}",
4514 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4517 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4518 "movd\t{$src, $dst|$dst, $src}",
4519 [(set FR32:$dst, (bitconvert GR32:$src))],
4522 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4523 "movd\t{$src, $dst|$dst, $src}",
4524 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4527 //===---------------------------------------------------------------------===//
4528 // Move Packed Doubleword Int to Packed Double Int
4530 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4531 "movd\t{$src, $dst|$dst, $src}",
4532 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4533 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX;
4534 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
4535 (ins i32mem:$dst, VR128:$src),
4536 "movd\t{$src, $dst|$dst, $src}",
4537 [(store (i32 (vector_extract (v4i32 VR128:$src),
4538 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
4540 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4541 "movd\t{$src, $dst|$dst, $src}",
4542 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4543 (iPTR 0)))], IIC_SSE_MOVD_ToGP>;
4544 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4545 "movd\t{$src, $dst|$dst, $src}",
4546 [(store (i32 (vector_extract (v4i32 VR128:$src),
4547 (iPTR 0))), addr:$dst)],
4550 //===---------------------------------------------------------------------===//
4551 // Move Packed Doubleword Int first element to Doubleword Int
4553 def VMOVPQIto64rr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4554 "mov{d|q}\t{$src, $dst|$dst, $src}",
4555 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4558 TB, OpSize, VEX, VEX_W, Requires<[HasAVX, In64BitMode]>;
4560 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4561 "mov{d|q}\t{$src, $dst|$dst, $src}",
4562 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4566 //===---------------------------------------------------------------------===//
4567 // Bitcast FR64 <-> GR64
4569 let Predicates = [HasAVX] in
4570 def VMOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4571 "vmovq\t{$src, $dst|$dst, $src}",
4572 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4574 def VMOVSDto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4575 "mov{d|q}\t{$src, $dst|$dst, $src}",
4576 [(set GR64:$dst, (bitconvert FR64:$src))],
4577 IIC_SSE_MOVDQ>, VEX;
4578 def VMOVSDto64mr : VRPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4579 "movq\t{$src, $dst|$dst, $src}",
4580 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4581 IIC_SSE_MOVDQ>, VEX;
4583 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4584 "movq\t{$src, $dst|$dst, $src}",
4585 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
4587 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4588 "mov{d|q}\t{$src, $dst|$dst, $src}",
4589 [(set GR64:$dst, (bitconvert FR64:$src))],
4591 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4592 "movq\t{$src, $dst|$dst, $src}",
4593 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4596 //===---------------------------------------------------------------------===//
4597 // Move Scalar Single to Double Int
4599 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4600 "movd\t{$src, $dst|$dst, $src}",
4601 [(set GR32:$dst, (bitconvert FR32:$src))],
4602 IIC_SSE_MOVD_ToGP>, VEX;
4603 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4604 "movd\t{$src, $dst|$dst, $src}",
4605 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4606 IIC_SSE_MOVDQ>, VEX;
4607 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4608 "movd\t{$src, $dst|$dst, $src}",
4609 [(set GR32:$dst, (bitconvert FR32:$src))],
4611 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4612 "movd\t{$src, $dst|$dst, $src}",
4613 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4616 //===---------------------------------------------------------------------===//
4617 // Patterns and instructions to describe movd/movq to XMM register zero-extends
4619 let AddedComplexity = 15 in {
4620 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4621 "movd\t{$src, $dst|$dst, $src}",
4622 [(set VR128:$dst, (v4i32 (X86vzmovl
4623 (v4i32 (scalar_to_vector GR32:$src)))))],
4624 IIC_SSE_MOVDQ>, VEX;
4625 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4626 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4627 [(set VR128:$dst, (v2i64 (X86vzmovl
4628 (v2i64 (scalar_to_vector GR64:$src)))))],
4632 let AddedComplexity = 15 in {
4633 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4634 "movd\t{$src, $dst|$dst, $src}",
4635 [(set VR128:$dst, (v4i32 (X86vzmovl
4636 (v4i32 (scalar_to_vector GR32:$src)))))],
4638 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4639 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4640 [(set VR128:$dst, (v2i64 (X86vzmovl
4641 (v2i64 (scalar_to_vector GR64:$src)))))],
4645 let AddedComplexity = 20 in {
4646 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4647 "movd\t{$src, $dst|$dst, $src}",
4649 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4650 (loadi32 addr:$src))))))],
4651 IIC_SSE_MOVDQ>, VEX;
4652 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4653 "movd\t{$src, $dst|$dst, $src}",
4655 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4656 (loadi32 addr:$src))))))],
4660 let Predicates = [HasAVX] in {
4661 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
4662 let AddedComplexity = 20 in {
4663 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4664 (VMOVZDI2PDIrm addr:$src)>;
4665 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4666 (VMOVZDI2PDIrm addr:$src)>;
4668 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4669 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4670 (v4i32 (scalar_to_vector GR32:$src)),(i32 0)))),
4671 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
4672 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4673 (v2i64 (scalar_to_vector GR64:$src)),(i32 0)))),
4674 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
4677 let Predicates = [HasSSE2], AddedComplexity = 20 in {
4678 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4679 (MOVZDI2PDIrm addr:$src)>;
4680 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4681 (MOVZDI2PDIrm addr:$src)>;
4684 // These are the correct encodings of the instructions so that we know how to
4685 // read correct assembly, even though we continue to emit the wrong ones for
4686 // compatibility with Darwin's buggy assembler.
4687 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4688 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4689 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4690 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
4691 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4692 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4693 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4694 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
4695 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4696 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4697 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4698 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4700 //===---------------------------------------------------------------------===//
4701 // SSE2 - Move Quadword
4702 //===---------------------------------------------------------------------===//
4704 //===---------------------------------------------------------------------===//
4705 // Move Quadword Int to Packed Quadword Int
4707 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4708 "vmovq\t{$src, $dst|$dst, $src}",
4710 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4711 VEX, Requires<[HasAVX]>;
4712 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4713 "movq\t{$src, $dst|$dst, $src}",
4715 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
4717 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
4719 //===---------------------------------------------------------------------===//
4720 // Move Packed Quadword Int to Quadword Int
4722 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4723 "movq\t{$src, $dst|$dst, $src}",
4724 [(store (i64 (vector_extract (v2i64 VR128:$src),
4725 (iPTR 0))), addr:$dst)],
4726 IIC_SSE_MOVDQ>, VEX;
4727 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4728 "movq\t{$src, $dst|$dst, $src}",
4729 [(store (i64 (vector_extract (v2i64 VR128:$src),
4730 (iPTR 0))), addr:$dst)],
4733 //===---------------------------------------------------------------------===//
4734 // Store / copy lower 64-bits of a XMM register.
4736 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4737 "movq\t{$src, $dst|$dst, $src}",
4738 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
4739 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4740 "movq\t{$src, $dst|$dst, $src}",
4741 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)],
4744 let AddedComplexity = 20 in
4745 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4746 "vmovq\t{$src, $dst|$dst, $src}",
4748 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4749 (loadi64 addr:$src))))))],
4751 XS, VEX, Requires<[HasAVX]>;
4753 let AddedComplexity = 20 in
4754 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4755 "movq\t{$src, $dst|$dst, $src}",
4757 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4758 (loadi64 addr:$src))))))],
4760 XS, Requires<[HasSSE2]>;
4762 let Predicates = [HasAVX], AddedComplexity = 20 in {
4763 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4764 (VMOVZQI2PQIrm addr:$src)>;
4765 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4766 (VMOVZQI2PQIrm addr:$src)>;
4767 def : Pat<(v2i64 (X86vzload addr:$src)),
4768 (VMOVZQI2PQIrm addr:$src)>;
4771 let Predicates = [HasSSE2], AddedComplexity = 20 in {
4772 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4773 (MOVZQI2PQIrm addr:$src)>;
4774 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4775 (MOVZQI2PQIrm addr:$src)>;
4776 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
4779 let Predicates = [HasAVX] in {
4780 def : Pat<(v4i64 (alignedX86vzload addr:$src)),
4781 (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
4782 def : Pat<(v4i64 (X86vzload addr:$src)),
4783 (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
4786 //===---------------------------------------------------------------------===//
4787 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
4788 // IA32 document. movq xmm1, xmm2 does clear the high bits.
4790 let AddedComplexity = 15 in
4791 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4792 "vmovq\t{$src, $dst|$dst, $src}",
4793 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
4795 XS, VEX, Requires<[HasAVX]>;
4796 let AddedComplexity = 15 in
4797 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4798 "movq\t{$src, $dst|$dst, $src}",
4799 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
4801 XS, Requires<[HasSSE2]>;
4803 let AddedComplexity = 20 in
4804 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4805 "vmovq\t{$src, $dst|$dst, $src}",
4806 [(set VR128:$dst, (v2i64 (X86vzmovl
4807 (loadv2i64 addr:$src))))],
4809 XS, VEX, Requires<[HasAVX]>;
4810 let AddedComplexity = 20 in {
4811 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4812 "movq\t{$src, $dst|$dst, $src}",
4813 [(set VR128:$dst, (v2i64 (X86vzmovl
4814 (loadv2i64 addr:$src))))],
4816 XS, Requires<[HasSSE2]>;
4819 let AddedComplexity = 20 in {
4820 let Predicates = [HasAVX] in {
4821 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4822 (VMOVZPQILo2PQIrm addr:$src)>;
4823 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4824 (VMOVZPQILo2PQIrr VR128:$src)>;
4826 let Predicates = [HasSSE2] in {
4827 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4828 (MOVZPQILo2PQIrm addr:$src)>;
4829 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4830 (MOVZPQILo2PQIrr VR128:$src)>;
4834 // Instructions to match in the assembler
4835 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4836 "movq\t{$src, $dst|$dst, $src}", [],
4837 IIC_SSE_MOVDQ>, VEX, VEX_W;
4838 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4839 "movq\t{$src, $dst|$dst, $src}", [],
4840 IIC_SSE_MOVDQ>, VEX, VEX_W;
4841 // Recognize "movd" with GR64 destination, but encode as a "movq"
4842 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4843 "movd\t{$src, $dst|$dst, $src}", [],
4844 IIC_SSE_MOVDQ>, VEX, VEX_W;
4846 // Instructions for the disassembler
4847 // xr = XMM register
4850 let Predicates = [HasAVX] in
4851 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4852 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
4853 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4854 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, XS;
4856 //===---------------------------------------------------------------------===//
4857 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
4858 //===---------------------------------------------------------------------===//
4859 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
4860 ValueType vt, RegisterClass RC, PatFrag mem_frag,
4861 X86MemOperand x86memop> {
4862 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4863 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4864 [(set RC:$dst, (vt (OpNode RC:$src)))],
4866 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
4867 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4868 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
4872 let Predicates = [HasAVX] in {
4873 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4874 v4f32, VR128, memopv4f32, f128mem>, VEX;
4875 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4876 v4f32, VR128, memopv4f32, f128mem>, VEX;
4877 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4878 v8f32, VR256, memopv8f32, f256mem>, VEX;
4879 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4880 v8f32, VR256, memopv8f32, f256mem>, VEX;
4882 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
4883 memopv4f32, f128mem>;
4884 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
4885 memopv4f32, f128mem>;
4887 let Predicates = [HasAVX] in {
4888 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4889 (VMOVSHDUPrr VR128:$src)>;
4890 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4891 (VMOVSHDUPrm addr:$src)>;
4892 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4893 (VMOVSLDUPrr VR128:$src)>;
4894 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4895 (VMOVSLDUPrm addr:$src)>;
4896 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
4897 (VMOVSHDUPYrr VR256:$src)>;
4898 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
4899 (VMOVSHDUPYrm addr:$src)>;
4900 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
4901 (VMOVSLDUPYrr VR256:$src)>;
4902 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
4903 (VMOVSLDUPYrm addr:$src)>;
4906 let Predicates = [HasSSE3] in {
4907 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4908 (MOVSHDUPrr VR128:$src)>;
4909 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4910 (MOVSHDUPrm addr:$src)>;
4911 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4912 (MOVSLDUPrr VR128:$src)>;
4913 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4914 (MOVSLDUPrm addr:$src)>;
4917 //===---------------------------------------------------------------------===//
4918 // SSE3 - Replicate Double FP - MOVDDUP
4919 //===---------------------------------------------------------------------===//
4921 multiclass sse3_replicate_dfp<string OpcodeStr> {
4922 let neverHasSideEffects = 1 in
4923 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4924 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4925 [], IIC_SSE_MOV_LH>;
4926 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
4927 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4930 (scalar_to_vector (loadf64 addr:$src)))))],
4934 // FIXME: Merge with above classe when there're patterns for the ymm version
4935 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
4936 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
4937 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4938 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>;
4939 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
4940 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4943 (scalar_to_vector (loadf64 addr:$src)))))]>;
4946 let Predicates = [HasAVX] in {
4947 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
4948 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
4951 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
4953 let Predicates = [HasAVX] in {
4954 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4955 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4956 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4957 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4958 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4959 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4960 def : Pat<(X86Movddup (bc_v2f64
4961 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4962 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4965 def : Pat<(X86Movddup (memopv4f64 addr:$src)),
4966 (VMOVDDUPYrm addr:$src)>;
4967 def : Pat<(X86Movddup (memopv4i64 addr:$src)),
4968 (VMOVDDUPYrm addr:$src)>;
4969 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
4970 (VMOVDDUPYrm addr:$src)>;
4971 def : Pat<(X86Movddup (v4i64 VR256:$src)),
4972 (VMOVDDUPYrr VR256:$src)>;
4975 let Predicates = [HasSSE3] in {
4976 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4977 (MOVDDUPrm addr:$src)>;
4978 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4979 (MOVDDUPrm addr:$src)>;
4980 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4981 (MOVDDUPrm addr:$src)>;
4982 def : Pat<(X86Movddup (bc_v2f64
4983 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4984 (MOVDDUPrm addr:$src)>;
4987 //===---------------------------------------------------------------------===//
4988 // SSE3 - Move Unaligned Integer
4989 //===---------------------------------------------------------------------===//
4991 let Predicates = [HasAVX] in {
4992 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4993 "vlddqu\t{$src, $dst|$dst, $src}",
4994 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
4995 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4996 "vlddqu\t{$src, $dst|$dst, $src}",
4997 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
4999 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5000 "lddqu\t{$src, $dst|$dst, $src}",
5001 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
5004 //===---------------------------------------------------------------------===//
5005 // SSE3 - Arithmetic
5006 //===---------------------------------------------------------------------===//
5008 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
5009 X86MemOperand x86memop, OpndItins itins,
5011 def rr : I<0xD0, MRMSrcReg,
5012 (outs RC:$dst), (ins RC:$src1, RC:$src2),
5014 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5015 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5016 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>;
5017 def rm : I<0xD0, MRMSrcMem,
5018 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5020 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5021 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5022 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))], itins.rr>;
5025 let Predicates = [HasAVX] in {
5026 let ExeDomain = SSEPackedSingle in {
5027 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
5028 f128mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
5029 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
5030 f256mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
5032 let ExeDomain = SSEPackedDouble in {
5033 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5034 f128mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
5035 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5036 f256mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
5039 let Constraints = "$src1 = $dst", Predicates = [HasSSE3] in {
5040 let ExeDomain = SSEPackedSingle in
5041 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5042 f128mem, SSE_ALU_F32P>, TB, XD;
5043 let ExeDomain = SSEPackedDouble in
5044 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5045 f128mem, SSE_ALU_F64P>, TB, OpSize;
5048 //===---------------------------------------------------------------------===//
5049 // SSE3 Instructions
5050 //===---------------------------------------------------------------------===//
5053 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5054 X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
5055 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5057 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5058 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5059 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>;
5061 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5063 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5064 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5065 [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
5066 IIC_SSE_HADDSUB_RM>;
5068 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5069 X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
5070 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5072 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5073 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5074 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>;
5076 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5078 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5079 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5080 [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
5081 IIC_SSE_HADDSUB_RM>;
5084 let Predicates = [HasAVX] in {
5085 let ExeDomain = SSEPackedSingle in {
5086 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5087 X86fhadd, 0>, VEX_4V;
5088 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5089 X86fhsub, 0>, VEX_4V;
5090 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5091 X86fhadd, 0>, VEX_4V;
5092 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5093 X86fhsub, 0>, VEX_4V;
5095 let ExeDomain = SSEPackedDouble in {
5096 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5097 X86fhadd, 0>, VEX_4V;
5098 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5099 X86fhsub, 0>, VEX_4V;
5100 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5101 X86fhadd, 0>, VEX_4V;
5102 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5103 X86fhsub, 0>, VEX_4V;
5107 let Constraints = "$src1 = $dst" in {
5108 let ExeDomain = SSEPackedSingle in {
5109 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd>;
5110 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub>;
5112 let ExeDomain = SSEPackedDouble in {
5113 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd>;
5114 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub>;
5118 //===---------------------------------------------------------------------===//
5119 // SSSE3 - Packed Absolute Instructions
5120 //===---------------------------------------------------------------------===//
5123 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5124 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
5125 Intrinsic IntId128> {
5126 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5128 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5129 [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
5132 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5134 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5137 (bitconvert (memopv2i64 addr:$src))))], IIC_SSE_PABS_RM>,
5141 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5142 multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
5143 Intrinsic IntId256> {
5144 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5146 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5147 [(set VR256:$dst, (IntId256 VR256:$src))]>,
5150 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5152 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5155 (bitconvert (memopv4i64 addr:$src))))]>, OpSize;
5158 let Predicates = [HasAVX] in {
5159 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb",
5160 int_x86_ssse3_pabs_b_128>, VEX;
5161 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw",
5162 int_x86_ssse3_pabs_w_128>, VEX;
5163 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd",
5164 int_x86_ssse3_pabs_d_128>, VEX;
5167 let Predicates = [HasAVX2] in {
5168 defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
5169 int_x86_avx2_pabs_b>, VEX;
5170 defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
5171 int_x86_avx2_pabs_w>, VEX;
5172 defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
5173 int_x86_avx2_pabs_d>, VEX;
5176 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb",
5177 int_x86_ssse3_pabs_b_128>;
5178 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw",
5179 int_x86_ssse3_pabs_w_128>;
5180 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd",
5181 int_x86_ssse3_pabs_d_128>;
5183 //===---------------------------------------------------------------------===//
5184 // SSSE3 - Packed Binary Operator Instructions
5185 //===---------------------------------------------------------------------===//
5187 def SSE_PHADDSUBD : OpndItins<
5188 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5190 def SSE_PHADDSUBSW : OpndItins<
5191 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5193 def SSE_PHADDSUBW : OpndItins<
5194 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5196 def SSE_PSHUFB : OpndItins<
5197 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5199 def SSE_PSIGN : OpndItins<
5200 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5202 def SSE_PMULHRSW : OpndItins<
5203 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5206 /// SS3I_binop_rm - Simple SSSE3 bin op
5207 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5208 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5209 X86MemOperand x86memop, OpndItins itins,
5211 let isCommutable = 1 in
5212 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5213 (ins RC:$src1, RC:$src2),
5215 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5216 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5217 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5219 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5220 (ins RC:$src1, x86memop:$src2),
5222 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5223 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5225 (OpVT (OpNode RC:$src1,
5226 (bitconvert (memop_frag addr:$src2)))))], itins.rm>, OpSize;
5229 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5230 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5231 Intrinsic IntId128, OpndItins itins,
5233 let isCommutable = 1 in
5234 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5235 (ins VR128:$src1, VR128:$src2),
5237 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5238 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5239 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5241 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5242 (ins VR128:$src1, i128mem:$src2),
5244 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5245 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5247 (IntId128 VR128:$src1,
5248 (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
5251 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5252 Intrinsic IntId256> {
5253 let isCommutable = 1 in
5254 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5255 (ins VR256:$src1, VR256:$src2),
5256 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5257 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5259 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5260 (ins VR256:$src1, i256mem:$src2),
5261 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5263 (IntId256 VR256:$src1,
5264 (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
5267 let ImmT = NoImm, Predicates = [HasAVX] in {
5268 let isCommutable = 0 in {
5269 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5270 memopv2i64, i128mem,
5271 SSE_PHADDSUBW, 0>, VEX_4V;
5272 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5273 memopv2i64, i128mem,
5274 SSE_PHADDSUBD, 0>, VEX_4V;
5275 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5276 memopv2i64, i128mem,
5277 SSE_PHADDSUBW, 0>, VEX_4V;
5278 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5279 memopv2i64, i128mem,
5280 SSE_PHADDSUBD, 0>, VEX_4V;
5281 defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
5282 memopv2i64, i128mem,
5283 SSE_PSIGN, 0>, VEX_4V;
5284 defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
5285 memopv2i64, i128mem,
5286 SSE_PSIGN, 0>, VEX_4V;
5287 defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
5288 memopv2i64, i128mem,
5289 SSE_PSIGN, 0>, VEX_4V;
5290 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5291 memopv2i64, i128mem,
5292 SSE_PSHUFB, 0>, VEX_4V;
5293 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5294 int_x86_ssse3_phadd_sw_128,
5295 SSE_PHADDSUBSW, 0>, VEX_4V;
5296 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5297 int_x86_ssse3_phsub_sw_128,
5298 SSE_PHADDSUBSW, 0>, VEX_4V;
5299 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5300 int_x86_ssse3_pmadd_ub_sw_128,
5301 SSE_PMADD, 0>, VEX_4V;
5303 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5304 int_x86_ssse3_pmul_hr_sw_128,
5305 SSE_PMULHRSW, 0>, VEX_4V;
5308 let ImmT = NoImm, Predicates = [HasAVX2] in {
5309 let isCommutable = 0 in {
5310 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5311 memopv4i64, i256mem,
5312 SSE_PHADDSUBW, 0>, VEX_4V;
5313 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5314 memopv4i64, i256mem,
5315 SSE_PHADDSUBW, 0>, VEX_4V;
5316 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5317 memopv4i64, i256mem,
5318 SSE_PHADDSUBW, 0>, VEX_4V;
5319 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5320 memopv4i64, i256mem,
5321 SSE_PHADDSUBW, 0>, VEX_4V;
5322 defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
5323 memopv4i64, i256mem,
5324 SSE_PHADDSUBW, 0>, VEX_4V;
5325 defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
5326 memopv4i64, i256mem,
5327 SSE_PHADDSUBW, 0>, VEX_4V;
5328 defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
5329 memopv4i64, i256mem,
5330 SSE_PHADDSUBW, 0>, VEX_4V;
5331 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5332 memopv4i64, i256mem,
5333 SSE_PHADDSUBW, 0>, VEX_4V;
5334 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5335 int_x86_avx2_phadd_sw>, VEX_4V;
5336 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5337 int_x86_avx2_phsub_sw>, VEX_4V;
5338 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5339 int_x86_avx2_pmadd_ub_sw>, VEX_4V;
5341 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5342 int_x86_avx2_pmul_hr_sw>, VEX_4V;
5345 // None of these have i8 immediate fields.
5346 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5347 let isCommutable = 0 in {
5348 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5349 memopv2i64, i128mem, SSE_PHADDSUBW>;
5350 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5351 memopv2i64, i128mem, SSE_PHADDSUBD>;
5352 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5353 memopv2i64, i128mem, SSE_PHADDSUBW>;
5354 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5355 memopv2i64, i128mem, SSE_PHADDSUBD>;
5356 defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
5357 memopv2i64, i128mem, SSE_PSIGN>;
5358 defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
5359 memopv2i64, i128mem, SSE_PSIGN>;
5360 defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
5361 memopv2i64, i128mem, SSE_PSIGN>;
5362 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5363 memopv2i64, i128mem, SSE_PSHUFB>;
5364 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5365 int_x86_ssse3_phadd_sw_128,
5367 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5368 int_x86_ssse3_phsub_sw_128,
5370 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5371 int_x86_ssse3_pmadd_ub_sw_128, SSE_PMADD>;
5373 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5374 int_x86_ssse3_pmul_hr_sw_128,
5378 //===---------------------------------------------------------------------===//
5379 // SSSE3 - Packed Align Instruction Patterns
5380 //===---------------------------------------------------------------------===//
5382 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
5383 let neverHasSideEffects = 1 in {
5384 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
5385 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5387 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5389 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5390 [], IIC_SSE_PALIGNR>, OpSize;
5392 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
5393 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5395 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5397 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5398 [], IIC_SSE_PALIGNR>, OpSize;
5402 multiclass ssse3_palign_y<string asm, bit Is2Addr = 1> {
5403 let neverHasSideEffects = 1 in {
5404 def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
5405 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5407 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5410 def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
5411 (ins VR256:$src1, i256mem:$src2, i8imm:$src3),
5413 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5418 let Predicates = [HasAVX] in
5419 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
5420 let Predicates = [HasAVX2] in
5421 defm VPALIGN : ssse3_palign_y<"vpalignr", 0>, VEX_4V;
5422 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
5423 defm PALIGN : ssse3_palign<"palignr">;
5425 let Predicates = [HasAVX2] in {
5426 def : Pat<(v8i32 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5427 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5428 def : Pat<(v8f32 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5429 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5430 def : Pat<(v16i16 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5431 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5432 def : Pat<(v32i8 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5433 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5436 let Predicates = [HasAVX] in {
5437 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5438 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5439 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5440 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5441 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5442 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5443 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5444 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5447 let Predicates = [HasSSSE3] in {
5448 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5449 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5450 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5451 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5452 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5453 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5454 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5455 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5458 //===---------------------------------------------------------------------===//
5459 // SSSE3 - Thread synchronization
5460 //===---------------------------------------------------------------------===//
5462 let usesCustomInserter = 1 in {
5463 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5464 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5465 Requires<[HasSSE3]>;
5468 let Uses = [EAX, ECX, EDX] in
5469 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
5470 TB, Requires<[HasSSE3]>;
5471 let Uses = [ECX, EAX] in
5472 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5473 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
5474 TB, Requires<[HasSSE3]>;
5476 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
5477 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
5479 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
5480 Requires<[In32BitMode]>;
5481 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
5482 Requires<[In64BitMode]>;
5484 //===----------------------------------------------------------------------===//
5485 // SSE4.1 - Packed Move with Sign/Zero Extend
5486 //===----------------------------------------------------------------------===//
5488 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5489 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5490 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5491 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5493 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5494 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5496 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
5500 multiclass SS41I_binop_rm_int16_y<bits<8> opc, string OpcodeStr,
5502 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5503 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5504 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5506 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
5507 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5508 [(set VR256:$dst, (IntId (load addr:$src)))]>, OpSize;
5511 let Predicates = [HasAVX] in {
5512 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
5514 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
5516 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
5518 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
5520 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
5522 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
5526 let Predicates = [HasAVX2] in {
5527 defm VPMOVSXBW : SS41I_binop_rm_int16_y<0x20, "vpmovsxbw",
5528 int_x86_avx2_pmovsxbw>, VEX;
5529 defm VPMOVSXWD : SS41I_binop_rm_int16_y<0x23, "vpmovsxwd",
5530 int_x86_avx2_pmovsxwd>, VEX;
5531 defm VPMOVSXDQ : SS41I_binop_rm_int16_y<0x25, "vpmovsxdq",
5532 int_x86_avx2_pmovsxdq>, VEX;
5533 defm VPMOVZXBW : SS41I_binop_rm_int16_y<0x30, "vpmovzxbw",
5534 int_x86_avx2_pmovzxbw>, VEX;
5535 defm VPMOVZXWD : SS41I_binop_rm_int16_y<0x33, "vpmovzxwd",
5536 int_x86_avx2_pmovzxwd>, VEX;
5537 defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq",
5538 int_x86_avx2_pmovzxdq>, VEX;
5541 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
5542 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
5543 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
5544 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
5545 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
5546 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
5548 let Predicates = [HasAVX] in {
5549 // Common patterns involving scalar load.
5550 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
5551 (VPMOVSXBWrm addr:$src)>;
5552 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
5553 (VPMOVSXBWrm addr:$src)>;
5555 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
5556 (VPMOVSXWDrm addr:$src)>;
5557 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
5558 (VPMOVSXWDrm addr:$src)>;
5560 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
5561 (VPMOVSXDQrm addr:$src)>;
5562 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
5563 (VPMOVSXDQrm addr:$src)>;
5565 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
5566 (VPMOVZXBWrm addr:$src)>;
5567 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
5568 (VPMOVZXBWrm addr:$src)>;
5570 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
5571 (VPMOVZXWDrm addr:$src)>;
5572 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
5573 (VPMOVZXWDrm addr:$src)>;
5575 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
5576 (VPMOVZXDQrm addr:$src)>;
5577 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
5578 (VPMOVZXDQrm addr:$src)>;
5581 let Predicates = [HasSSE41] in {
5582 // Common patterns involving scalar load.
5583 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
5584 (PMOVSXBWrm addr:$src)>;
5585 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
5586 (PMOVSXBWrm addr:$src)>;
5588 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
5589 (PMOVSXWDrm addr:$src)>;
5590 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
5591 (PMOVSXWDrm addr:$src)>;
5593 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
5594 (PMOVSXDQrm addr:$src)>;
5595 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
5596 (PMOVSXDQrm addr:$src)>;
5598 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
5599 (PMOVZXBWrm addr:$src)>;
5600 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
5601 (PMOVZXBWrm addr:$src)>;
5603 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
5604 (PMOVZXWDrm addr:$src)>;
5605 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
5606 (PMOVZXWDrm addr:$src)>;
5608 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
5609 (PMOVZXDQrm addr:$src)>;
5610 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
5611 (PMOVZXDQrm addr:$src)>;
5614 let Predicates = [HasAVX2] in {
5615 let AddedComplexity = 15 in {
5616 def : Pat<(v4i64 (X86vzmovly (v4i32 VR128:$src))),
5617 (VPMOVZXDQYrr VR128:$src)>;
5618 def : Pat<(v8i32 (X86vzmovly (v8i16 VR128:$src))),
5619 (VPMOVZXWDYrr VR128:$src)>;
5622 def : Pat<(v4i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQYrr VR128:$src)>;
5623 def : Pat<(v8i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDYrr VR128:$src)>;
5626 let Predicates = [HasAVX] in {
5627 def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
5628 def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
5631 let Predicates = [HasSSE41] in {
5632 def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
5633 def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
5637 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5638 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5639 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5640 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5642 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
5643 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5645 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
5649 multiclass SS41I_binop_rm_int8_y<bits<8> opc, string OpcodeStr,
5651 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5652 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5653 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5655 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i32mem:$src),
5656 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5658 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
5662 let Predicates = [HasAVX] in {
5663 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
5665 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
5667 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
5669 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
5673 let Predicates = [HasAVX2] in {
5674 defm VPMOVSXBD : SS41I_binop_rm_int8_y<0x21, "vpmovsxbd",
5675 int_x86_avx2_pmovsxbd>, VEX;
5676 defm VPMOVSXWQ : SS41I_binop_rm_int8_y<0x24, "vpmovsxwq",
5677 int_x86_avx2_pmovsxwq>, VEX;
5678 defm VPMOVZXBD : SS41I_binop_rm_int8_y<0x31, "vpmovzxbd",
5679 int_x86_avx2_pmovzxbd>, VEX;
5680 defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq",
5681 int_x86_avx2_pmovzxwq>, VEX;
5684 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
5685 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
5686 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
5687 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
5689 let Predicates = [HasAVX] in {
5690 // Common patterns involving scalar load
5691 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
5692 (VPMOVSXBDrm addr:$src)>;
5693 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
5694 (VPMOVSXWQrm addr:$src)>;
5696 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
5697 (VPMOVZXBDrm addr:$src)>;
5698 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
5699 (VPMOVZXWQrm addr:$src)>;
5702 let Predicates = [HasSSE41] in {
5703 // Common patterns involving scalar load
5704 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
5705 (PMOVSXBDrm addr:$src)>;
5706 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
5707 (PMOVSXWQrm addr:$src)>;
5709 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
5710 (PMOVZXBDrm addr:$src)>;
5711 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
5712 (PMOVZXWQrm addr:$src)>;
5715 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5716 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5717 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5718 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5720 // Expecting a i16 load any extended to i32 value.
5721 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
5722 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5723 [(set VR128:$dst, (IntId (bitconvert
5724 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
5728 multiclass SS41I_binop_rm_int4_y<bits<8> opc, string OpcodeStr,
5730 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5731 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5732 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5734 // Expecting a i16 load any extended to i32 value.
5735 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i16mem:$src),
5736 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5737 [(set VR256:$dst, (IntId (bitconvert
5738 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
5742 let Predicates = [HasAVX] in {
5743 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
5745 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
5748 let Predicates = [HasAVX2] in {
5749 defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq",
5750 int_x86_avx2_pmovsxbq>, VEX;
5751 defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq",
5752 int_x86_avx2_pmovzxbq>, VEX;
5754 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
5755 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
5757 let Predicates = [HasAVX] in {
5758 // Common patterns involving scalar load
5759 def : Pat<(int_x86_sse41_pmovsxbq
5760 (bitconvert (v4i32 (X86vzmovl
5761 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5762 (VPMOVSXBQrm addr:$src)>;
5764 def : Pat<(int_x86_sse41_pmovzxbq
5765 (bitconvert (v4i32 (X86vzmovl
5766 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5767 (VPMOVZXBQrm addr:$src)>;
5770 let Predicates = [HasSSE41] in {
5771 // Common patterns involving scalar load
5772 def : Pat<(int_x86_sse41_pmovsxbq
5773 (bitconvert (v4i32 (X86vzmovl
5774 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5775 (PMOVSXBQrm addr:$src)>;
5777 def : Pat<(int_x86_sse41_pmovzxbq
5778 (bitconvert (v4i32 (X86vzmovl
5779 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5780 (PMOVZXBQrm addr:$src)>;
5783 //===----------------------------------------------------------------------===//
5784 // SSE4.1 - Extract Instructions
5785 //===----------------------------------------------------------------------===//
5787 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
5788 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
5789 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5790 (ins VR128:$src1, i32i8imm:$src2),
5791 !strconcat(OpcodeStr,
5792 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5793 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
5795 let neverHasSideEffects = 1, mayStore = 1 in
5796 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5797 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
5798 !strconcat(OpcodeStr,
5799 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5802 // There's an AssertZext in the way of writing the store pattern
5803 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5806 let Predicates = [HasAVX] in {
5807 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
5808 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
5809 (ins VR128:$src1, i32i8imm:$src2),
5810 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
5813 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
5816 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
5817 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
5818 let neverHasSideEffects = 1, mayStore = 1 in
5819 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5820 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
5821 !strconcat(OpcodeStr,
5822 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5825 // There's an AssertZext in the way of writing the store pattern
5826 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5829 let Predicates = [HasAVX] in
5830 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
5832 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
5835 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5836 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
5837 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5838 (ins VR128:$src1, i32i8imm:$src2),
5839 !strconcat(OpcodeStr,
5840 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5842 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
5843 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5844 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
5845 !strconcat(OpcodeStr,
5846 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5847 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
5848 addr:$dst)]>, OpSize;
5851 let Predicates = [HasAVX] in
5852 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
5854 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
5856 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5857 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
5858 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
5859 (ins VR128:$src1, i32i8imm:$src2),
5860 !strconcat(OpcodeStr,
5861 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5863 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
5864 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5865 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
5866 !strconcat(OpcodeStr,
5867 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5868 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
5869 addr:$dst)]>, OpSize, REX_W;
5872 let Predicates = [HasAVX] in
5873 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
5875 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
5877 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
5879 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
5880 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5881 (ins VR128:$src1, i32i8imm:$src2),
5882 !strconcat(OpcodeStr,
5883 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5885 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
5887 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5888 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
5889 !strconcat(OpcodeStr,
5890 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5891 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
5892 addr:$dst)]>, OpSize;
5895 let ExeDomain = SSEPackedSingle in {
5896 let Predicates = [HasAVX] in {
5897 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
5898 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
5899 (ins VR128:$src1, i32i8imm:$src2),
5900 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
5903 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
5906 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
5907 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
5910 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
5912 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
5915 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
5916 Requires<[HasSSE41]>;
5918 //===----------------------------------------------------------------------===//
5919 // SSE4.1 - Insert Instructions
5920 //===----------------------------------------------------------------------===//
5922 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
5923 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5924 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
5926 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5928 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5930 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
5931 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5932 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
5934 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5936 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5938 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
5939 imm:$src3))]>, OpSize;
5942 let Predicates = [HasAVX] in
5943 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
5944 let Constraints = "$src1 = $dst" in
5945 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
5947 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
5948 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5949 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
5951 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5953 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5955 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
5957 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5958 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
5960 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5962 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5964 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
5965 imm:$src3)))]>, OpSize;
5968 let Predicates = [HasAVX] in
5969 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
5970 let Constraints = "$src1 = $dst" in
5971 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
5973 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
5974 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5975 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
5977 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5979 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5981 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
5983 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5984 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
5986 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5988 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5990 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
5991 imm:$src3)))]>, OpSize;
5994 let Predicates = [HasAVX] in
5995 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
5996 let Constraints = "$src1 = $dst" in
5997 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
5999 // insertps has a few different modes, there's the first two here below which
6000 // are optimized inserts that won't zero arbitrary elements in the destination
6001 // vector. The next one matches the intrinsic and could zero arbitrary elements
6002 // in the target vector.
6003 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
6004 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6005 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
6007 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6009 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6011 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
6013 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6014 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
6016 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6018 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6020 (X86insrtps VR128:$src1,
6021 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6022 imm:$src3))]>, OpSize;
6025 let ExeDomain = SSEPackedSingle in {
6026 let Predicates = [HasAVX] in
6027 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6028 let Constraints = "$src1 = $dst" in
6029 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
6032 //===----------------------------------------------------------------------===//
6033 // SSE4.1 - Round Instructions
6034 //===----------------------------------------------------------------------===//
6036 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6037 X86MemOperand x86memop, RegisterClass RC,
6038 PatFrag mem_frag32, PatFrag mem_frag64,
6039 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6040 let ExeDomain = SSEPackedSingle in {
6041 // Intrinsic operation, reg.
6042 // Vector intrinsic operation, reg
6043 def PSr : SS4AIi8<opcps, MRMSrcReg,
6044 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
6045 !strconcat(OpcodeStr,
6046 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6047 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
6050 // Vector intrinsic operation, mem
6051 def PSm : SS4AIi8<opcps, MRMSrcMem,
6052 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
6053 !strconcat(OpcodeStr,
6054 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6056 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
6058 } // ExeDomain = SSEPackedSingle
6060 let ExeDomain = SSEPackedDouble in {
6061 // Vector intrinsic operation, reg
6062 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6063 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
6064 !strconcat(OpcodeStr,
6065 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6066 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
6069 // Vector intrinsic operation, mem
6070 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6071 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
6072 !strconcat(OpcodeStr,
6073 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6075 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
6077 } // ExeDomain = SSEPackedDouble
6080 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6083 Intrinsic F64Int, bit Is2Addr = 1> {
6084 let ExeDomain = GenericDomain in {
6086 def SSr : SS4AIi8<opcss, MRMSrcReg,
6087 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32i8imm:$src3),
6089 !strconcat(OpcodeStr,
6090 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6091 !strconcat(OpcodeStr,
6092 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6095 // Intrinsic operation, reg.
6096 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6097 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
6099 !strconcat(OpcodeStr,
6100 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6101 !strconcat(OpcodeStr,
6102 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6103 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6106 // Intrinsic operation, mem.
6107 def SSm : SS4AIi8<opcss, MRMSrcMem,
6108 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
6110 !strconcat(OpcodeStr,
6111 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6112 !strconcat(OpcodeStr,
6113 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6115 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6119 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6120 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32i8imm:$src3),
6122 !strconcat(OpcodeStr,
6123 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6124 !strconcat(OpcodeStr,
6125 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6128 // Intrinsic operation, reg.
6129 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6130 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
6132 !strconcat(OpcodeStr,
6133 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6134 !strconcat(OpcodeStr,
6135 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6136 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6139 // Intrinsic operation, mem.
6140 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6141 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
6143 !strconcat(OpcodeStr,
6144 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6145 !strconcat(OpcodeStr,
6146 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6148 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6150 } // ExeDomain = GenericDomain
6153 // FP round - roundss, roundps, roundsd, roundpd
6154 let Predicates = [HasAVX] in {
6156 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6157 memopv4f32, memopv2f64,
6158 int_x86_sse41_round_ps,
6159 int_x86_sse41_round_pd>, VEX;
6160 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6161 memopv8f32, memopv4f64,
6162 int_x86_avx_round_ps_256,
6163 int_x86_avx_round_pd_256>, VEX;
6164 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6165 int_x86_sse41_round_ss,
6166 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6168 def : Pat<(ffloor FR32:$src),
6169 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6170 def : Pat<(f64 (ffloor FR64:$src)),
6171 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6172 def : Pat<(f32 (fnearbyint FR32:$src)),
6173 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6174 def : Pat<(f64 (fnearbyint FR64:$src)),
6175 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6176 def : Pat<(f32 (fceil FR32:$src)),
6177 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6178 def : Pat<(f64 (fceil FR64:$src)),
6179 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6180 def : Pat<(f32 (frint FR32:$src)),
6181 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6182 def : Pat<(f64 (frint FR64:$src)),
6183 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6184 def : Pat<(f32 (ftrunc FR32:$src)),
6185 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6186 def : Pat<(f64 (ftrunc FR64:$src)),
6187 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6190 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6191 memopv4f32, memopv2f64,
6192 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6193 let Constraints = "$src1 = $dst" in
6194 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6195 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6197 def : Pat<(ffloor FR32:$src),
6198 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6199 def : Pat<(f64 (ffloor FR64:$src)),
6200 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6201 def : Pat<(f32 (fnearbyint FR32:$src)),
6202 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6203 def : Pat<(f64 (fnearbyint FR64:$src)),
6204 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6205 def : Pat<(f32 (fceil FR32:$src)),
6206 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6207 def : Pat<(f64 (fceil FR64:$src)),
6208 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6209 def : Pat<(f32 (frint FR32:$src)),
6210 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6211 def : Pat<(f64 (frint FR64:$src)),
6212 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6213 def : Pat<(f32 (ftrunc FR32:$src)),
6214 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6215 def : Pat<(f64 (ftrunc FR64:$src)),
6216 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6218 //===----------------------------------------------------------------------===//
6219 // SSE4.1 - Packed Bit Test
6220 //===----------------------------------------------------------------------===//
6222 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6223 // the intel intrinsic that corresponds to this.
6224 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6225 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6226 "vptest\t{$src2, $src1|$src1, $src2}",
6227 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6229 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6230 "vptest\t{$src2, $src1|$src1, $src2}",
6231 [(set EFLAGS,(X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6234 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6235 "vptest\t{$src2, $src1|$src1, $src2}",
6236 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6238 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6239 "vptest\t{$src2, $src1|$src1, $src2}",
6240 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
6244 let Defs = [EFLAGS] in {
6245 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6246 "ptest\t{$src2, $src1|$src1, $src2}",
6247 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6249 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6250 "ptest\t{$src2, $src1|$src1, $src2}",
6251 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6255 // The bit test instructions below are AVX only
6256 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6257 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6258 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6259 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6260 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
6261 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6262 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6263 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6267 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6268 let ExeDomain = SSEPackedSingle in {
6269 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
6270 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
6272 let ExeDomain = SSEPackedDouble in {
6273 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
6274 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
6278 //===----------------------------------------------------------------------===//
6279 // SSE4.1 - Misc Instructions
6280 //===----------------------------------------------------------------------===//
6282 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6283 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6284 "popcnt{w}\t{$src, $dst|$dst, $src}",
6285 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)]>,
6287 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6288 "popcnt{w}\t{$src, $dst|$dst, $src}",
6289 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6290 (implicit EFLAGS)]>, OpSize, XS;
6292 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6293 "popcnt{l}\t{$src, $dst|$dst, $src}",
6294 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)]>,
6296 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6297 "popcnt{l}\t{$src, $dst|$dst, $src}",
6298 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6299 (implicit EFLAGS)]>, XS;
6301 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6302 "popcnt{q}\t{$src, $dst|$dst, $src}",
6303 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)]>,
6305 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6306 "popcnt{q}\t{$src, $dst|$dst, $src}",
6307 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6308 (implicit EFLAGS)]>, XS;
6313 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6314 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6315 Intrinsic IntId128> {
6316 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6318 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6319 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
6320 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6322 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6325 (bitconvert (memopv2i64 addr:$src))))]>, OpSize;
6328 let Predicates = [HasAVX] in
6329 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6330 int_x86_sse41_phminposuw>, VEX;
6331 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6332 int_x86_sse41_phminposuw>;
6334 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
6335 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
6336 Intrinsic IntId128, bit Is2Addr = 1> {
6337 let isCommutable = 1 in
6338 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6339 (ins VR128:$src1, VR128:$src2),
6341 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6342 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6343 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
6344 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6345 (ins VR128:$src1, i128mem:$src2),
6347 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6348 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6350 (IntId128 VR128:$src1,
6351 (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
6354 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
6355 multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
6356 Intrinsic IntId256> {
6357 let isCommutable = 1 in
6358 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst),
6359 (ins VR256:$src1, VR256:$src2),
6360 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6361 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>, OpSize;
6362 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst),
6363 (ins VR256:$src1, i256mem:$src2),
6364 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6366 (IntId256 VR256:$src1,
6367 (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
6370 let Predicates = [HasAVX] in {
6371 let isCommutable = 0 in
6372 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
6374 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
6376 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
6378 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
6380 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
6382 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
6384 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
6386 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
6388 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
6390 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
6394 let Predicates = [HasAVX2] in {
6395 let isCommutable = 0 in
6396 defm VPACKUSDW : SS41I_binop_rm_int_y<0x2B, "vpackusdw",
6397 int_x86_avx2_packusdw>, VEX_4V;
6398 defm VPMINSB : SS41I_binop_rm_int_y<0x38, "vpminsb",
6399 int_x86_avx2_pmins_b>, VEX_4V;
6400 defm VPMINSD : SS41I_binop_rm_int_y<0x39, "vpminsd",
6401 int_x86_avx2_pmins_d>, VEX_4V;
6402 defm VPMINUD : SS41I_binop_rm_int_y<0x3B, "vpminud",
6403 int_x86_avx2_pminu_d>, VEX_4V;
6404 defm VPMINUW : SS41I_binop_rm_int_y<0x3A, "vpminuw",
6405 int_x86_avx2_pminu_w>, VEX_4V;
6406 defm VPMAXSB : SS41I_binop_rm_int_y<0x3C, "vpmaxsb",
6407 int_x86_avx2_pmaxs_b>, VEX_4V;
6408 defm VPMAXSD : SS41I_binop_rm_int_y<0x3D, "vpmaxsd",
6409 int_x86_avx2_pmaxs_d>, VEX_4V;
6410 defm VPMAXUD : SS41I_binop_rm_int_y<0x3F, "vpmaxud",
6411 int_x86_avx2_pmaxu_d>, VEX_4V;
6412 defm VPMAXUW : SS41I_binop_rm_int_y<0x3E, "vpmaxuw",
6413 int_x86_avx2_pmaxu_w>, VEX_4V;
6414 defm VPMULDQ : SS41I_binop_rm_int_y<0x28, "vpmuldq",
6415 int_x86_avx2_pmul_dq>, VEX_4V;
6418 let Constraints = "$src1 = $dst" in {
6419 let isCommutable = 0 in
6420 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
6421 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
6422 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
6423 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
6424 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
6425 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
6426 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
6427 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
6428 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
6429 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
6432 /// SS48I_binop_rm - Simple SSE41 binary operator.
6433 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6434 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6435 X86MemOperand x86memop, bit Is2Addr = 1> {
6436 let isCommutable = 1 in
6437 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6438 (ins RC:$src1, RC:$src2),
6440 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6441 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6442 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>, OpSize;
6443 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6444 (ins RC:$src1, x86memop:$src2),
6446 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6447 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6449 (OpVT (OpNode RC:$src1,
6450 (bitconvert (memop_frag addr:$src2)))))]>, OpSize;
6453 let Predicates = [HasAVX] in {
6454 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6455 memopv2i64, i128mem, 0>, VEX_4V;
6456 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6457 memopv2i64, i128mem, 0>, VEX_4V;
6459 let Predicates = [HasAVX2] in {
6460 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6461 memopv4i64, i256mem, 0>, VEX_4V;
6462 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6463 memopv4i64, i256mem, 0>, VEX_4V;
6466 let Constraints = "$src1 = $dst" in {
6467 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6468 memopv2i64, i128mem>;
6469 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6470 memopv2i64, i128mem>;
6473 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
6474 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
6475 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
6476 X86MemOperand x86memop, bit Is2Addr = 1> {
6477 let isCommutable = 1 in
6478 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6479 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
6481 !strconcat(OpcodeStr,
6482 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6483 !strconcat(OpcodeStr,
6484 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6485 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
6487 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6488 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
6490 !strconcat(OpcodeStr,
6491 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6492 !strconcat(OpcodeStr,
6493 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6496 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
6500 let Predicates = [HasAVX] in {
6501 let isCommutable = 0 in {
6502 let ExeDomain = SSEPackedSingle in {
6503 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
6504 VR128, memopv4f32, f128mem, 0>, VEX_4V;
6505 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
6506 int_x86_avx_blend_ps_256, VR256, memopv8f32, f256mem, 0>, VEX_4V;
6508 let ExeDomain = SSEPackedDouble in {
6509 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
6510 VR128, memopv2f64, f128mem, 0>, VEX_4V;
6511 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
6512 int_x86_avx_blend_pd_256, VR256, memopv4f64, f256mem, 0>, VEX_4V;
6514 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
6515 VR128, memopv2i64, i128mem, 0>, VEX_4V;
6516 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
6517 VR128, memopv2i64, i128mem, 0>, VEX_4V;
6519 let ExeDomain = SSEPackedSingle in
6520 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
6521 VR128, memopv4f32, f128mem, 0>, VEX_4V;
6522 let ExeDomain = SSEPackedDouble in
6523 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
6524 VR128, memopv2f64, f128mem, 0>, VEX_4V;
6525 let ExeDomain = SSEPackedSingle in
6526 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
6527 VR256, memopv8f32, i256mem, 0>, VEX_4V;
6530 let Predicates = [HasAVX2] in {
6531 let isCommutable = 0 in {
6532 defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
6533 VR256, memopv4i64, i256mem, 0>, VEX_4V;
6534 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
6535 VR256, memopv4i64, i256mem, 0>, VEX_4V;
6539 let Constraints = "$src1 = $dst" in {
6540 let isCommutable = 0 in {
6541 let ExeDomain = SSEPackedSingle in
6542 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
6543 VR128, memopv4f32, f128mem>;
6544 let ExeDomain = SSEPackedDouble in
6545 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
6546 VR128, memopv2f64, f128mem>;
6547 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
6548 VR128, memopv2i64, i128mem>;
6549 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
6550 VR128, memopv2i64, i128mem>;
6552 let ExeDomain = SSEPackedSingle in
6553 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
6554 VR128, memopv4f32, f128mem>;
6555 let ExeDomain = SSEPackedDouble in
6556 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
6557 VR128, memopv2f64, f128mem>;
6560 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
6561 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
6562 RegisterClass RC, X86MemOperand x86memop,
6563 PatFrag mem_frag, Intrinsic IntId> {
6564 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
6565 (ins RC:$src1, RC:$src2, RC:$src3),
6566 !strconcat(OpcodeStr,
6567 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6568 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
6569 IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
6571 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
6572 (ins RC:$src1, x86memop:$src2, RC:$src3),
6573 !strconcat(OpcodeStr,
6574 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6576 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
6578 IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
6581 let Predicates = [HasAVX] in {
6582 let ExeDomain = SSEPackedDouble in {
6583 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
6584 memopv2f64, int_x86_sse41_blendvpd>;
6585 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
6586 memopv4f64, int_x86_avx_blendv_pd_256>;
6587 } // ExeDomain = SSEPackedDouble
6588 let ExeDomain = SSEPackedSingle in {
6589 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
6590 memopv4f32, int_x86_sse41_blendvps>;
6591 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
6592 memopv8f32, int_x86_avx_blendv_ps_256>;
6593 } // ExeDomain = SSEPackedSingle
6594 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
6595 memopv2i64, int_x86_sse41_pblendvb>;
6598 let Predicates = [HasAVX2] in {
6599 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
6600 memopv4i64, int_x86_avx2_pblendvb>;
6603 let Predicates = [HasAVX] in {
6604 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
6605 (v16i8 VR128:$src2))),
6606 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6607 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
6608 (v4i32 VR128:$src2))),
6609 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6610 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
6611 (v4f32 VR128:$src2))),
6612 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6613 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
6614 (v2i64 VR128:$src2))),
6615 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6616 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
6617 (v2f64 VR128:$src2))),
6618 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6619 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
6620 (v8i32 VR256:$src2))),
6621 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6622 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
6623 (v8f32 VR256:$src2))),
6624 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6625 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
6626 (v4i64 VR256:$src2))),
6627 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6628 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
6629 (v4f64 VR256:$src2))),
6630 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6632 def : Pat<(v8f32 (X86Blendps (v8f32 VR256:$src1), (v8f32 VR256:$src2),
6634 (VBLENDPSYrri VR256:$src2, VR256:$src1, imm:$mask)>;
6635 def : Pat<(v4f64 (X86Blendpd (v4f64 VR256:$src1), (v4f64 VR256:$src2),
6637 (VBLENDPDYrri VR256:$src2, VR256:$src1, imm:$mask)>;
6639 def : Pat<(v8i16 (X86Blendpw (v8i16 VR128:$src1), (v8i16 VR128:$src2),
6641 (VPBLENDWrri VR128:$src2, VR128:$src1, imm:$mask)>;
6642 def : Pat<(v4f32 (X86Blendps (v4f32 VR128:$src1), (v4f32 VR128:$src2),
6644 (VBLENDPSrri VR128:$src2, VR128:$src1, imm:$mask)>;
6645 def : Pat<(v2f64 (X86Blendpd (v2f64 VR128:$src1), (v2f64 VR128:$src2),
6647 (VBLENDPDrri VR128:$src2, VR128:$src1, imm:$mask)>;
6650 let Predicates = [HasAVX2] in {
6651 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
6652 (v32i8 VR256:$src2))),
6653 (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6654 def : Pat<(v16i16 (X86Blendpw (v16i16 VR256:$src1), (v16i16 VR256:$src2),
6656 (VPBLENDWYrri VR256:$src2, VR256:$src1, imm:$mask)>;
6659 /// SS41I_ternary_int - SSE 4.1 ternary operator
6660 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
6661 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
6662 X86MemOperand x86memop, Intrinsic IntId> {
6663 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6664 (ins VR128:$src1, VR128:$src2),
6665 !strconcat(OpcodeStr,
6666 "\t{$src2, $dst|$dst, $src2}"),
6667 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
6670 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6671 (ins VR128:$src1, x86memop:$src2),
6672 !strconcat(OpcodeStr,
6673 "\t{$src2, $dst|$dst, $src2}"),
6676 (bitconvert (mem_frag addr:$src2)), XMM0))]>, OpSize;
6680 let ExeDomain = SSEPackedDouble in
6681 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
6682 int_x86_sse41_blendvpd>;
6683 let ExeDomain = SSEPackedSingle in
6684 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
6685 int_x86_sse41_blendvps>;
6686 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
6687 int_x86_sse41_pblendvb>;
6689 // Aliases with the implicit xmm0 argument
6690 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6691 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
6692 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6693 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
6694 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6695 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
6696 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6697 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
6698 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6699 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
6700 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6701 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
6703 let Predicates = [HasSSE41] in {
6704 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
6705 (v16i8 VR128:$src2))),
6706 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
6707 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
6708 (v4i32 VR128:$src2))),
6709 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
6710 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
6711 (v4f32 VR128:$src2))),
6712 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
6713 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
6714 (v2i64 VR128:$src2))),
6715 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
6716 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
6717 (v2f64 VR128:$src2))),
6718 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
6720 def : Pat<(v8i16 (X86Blendpw (v8i16 VR128:$src1), (v8i16 VR128:$src2),
6722 (PBLENDWrri VR128:$src2, VR128:$src1, imm:$mask)>;
6723 def : Pat<(v4f32 (X86Blendps (v4f32 VR128:$src1), (v4f32 VR128:$src2),
6725 (BLENDPSrri VR128:$src2, VR128:$src1, imm:$mask)>;
6726 def : Pat<(v2f64 (X86Blendpd (v2f64 VR128:$src1), (v2f64 VR128:$src2),
6728 (BLENDPDrri VR128:$src2, VR128:$src1, imm:$mask)>;
6732 let Predicates = [HasAVX] in
6733 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6734 "vmovntdqa\t{$src, $dst|$dst, $src}",
6735 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
6737 let Predicates = [HasAVX2] in
6738 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
6739 "vmovntdqa\t{$src, $dst|$dst, $src}",
6740 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
6742 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6743 "movntdqa\t{$src, $dst|$dst, $src}",
6744 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
6747 //===----------------------------------------------------------------------===//
6748 // SSE4.2 - Compare Instructions
6749 //===----------------------------------------------------------------------===//
6751 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
6752 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6753 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6754 X86MemOperand x86memop, bit Is2Addr = 1> {
6755 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
6756 (ins RC:$src1, RC:$src2),
6758 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6759 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6760 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6762 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
6763 (ins RC:$src1, x86memop:$src2),
6765 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6766 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6768 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>, OpSize;
6771 let Predicates = [HasAVX] in
6772 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
6773 memopv2i64, i128mem, 0>, VEX_4V;
6775 let Predicates = [HasAVX2] in
6776 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
6777 memopv4i64, i256mem, 0>, VEX_4V;
6779 let Constraints = "$src1 = $dst" in
6780 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
6781 memopv2i64, i128mem>;
6783 //===----------------------------------------------------------------------===//
6784 // SSE4.2 - String/text Processing Instructions
6785 //===----------------------------------------------------------------------===//
6787 // Packed Compare Implicit Length Strings, Return Mask
6788 multiclass pseudo_pcmpistrm<string asm> {
6789 def REG : PseudoI<(outs VR128:$dst),
6790 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6791 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
6793 def MEM : PseudoI<(outs VR128:$dst),
6794 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6795 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
6796 VR128:$src1, (load addr:$src2), imm:$src3))]>;
6799 let Defs = [EFLAGS], usesCustomInserter = 1 in {
6800 let AddedComplexity = 1 in
6801 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
6802 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
6805 let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1, Predicates = [HasAVX] in {
6806 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
6807 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6808 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
6810 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
6811 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6812 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
6815 let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in {
6816 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
6817 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6818 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
6820 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
6821 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6822 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
6825 // Packed Compare Explicit Length Strings, Return Mask
6826 multiclass pseudo_pcmpestrm<string asm> {
6827 def REG : PseudoI<(outs VR128:$dst),
6828 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
6829 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
6830 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
6831 def MEM : PseudoI<(outs VR128:$dst),
6832 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
6833 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
6834 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
6837 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
6838 let AddedComplexity = 1 in
6839 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
6840 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
6843 let Predicates = [HasAVX],
6844 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
6845 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
6846 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
6847 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
6849 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
6850 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
6851 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
6854 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
6855 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
6856 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
6857 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
6859 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
6860 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
6861 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
6864 // Packed Compare Implicit Length Strings, Return Index
6865 let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in {
6866 multiclass SS42AI_pcmpistri<string asm> {
6867 def rr : SS42AI<0x63, MRMSrcReg, (outs),
6868 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6869 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6872 def rm : SS42AI<0x63, MRMSrcMem, (outs),
6873 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6874 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
6879 let Predicates = [HasAVX] in
6880 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
6881 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
6883 // Packed Compare Explicit Length Strings, Return Index
6884 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
6885 multiclass SS42AI_pcmpestri<string asm> {
6886 def rr : SS42AI<0x61, MRMSrcReg, (outs),
6887 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
6888 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6891 def rm : SS42AI<0x61, MRMSrcMem, (outs),
6892 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
6893 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
6898 let Predicates = [HasAVX] in
6899 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
6900 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
6902 //===----------------------------------------------------------------------===//
6903 // SSE4.2 - CRC Instructions
6904 //===----------------------------------------------------------------------===//
6906 // No CRC instructions have AVX equivalents
6908 // crc intrinsic instruction
6909 // This set of instructions are only rm, the only difference is the size
6911 let Constraints = "$src1 = $dst" in {
6912 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
6913 (ins GR32:$src1, i8mem:$src2),
6914 "crc32{b} \t{$src2, $src1|$src1, $src2}",
6916 (int_x86_sse42_crc32_32_8 GR32:$src1,
6917 (load addr:$src2)))]>;
6918 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
6919 (ins GR32:$src1, GR8:$src2),
6920 "crc32{b} \t{$src2, $src1|$src1, $src2}",
6922 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
6923 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
6924 (ins GR32:$src1, i16mem:$src2),
6925 "crc32{w} \t{$src2, $src1|$src1, $src2}",
6927 (int_x86_sse42_crc32_32_16 GR32:$src1,
6928 (load addr:$src2)))]>,
6930 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
6931 (ins GR32:$src1, GR16:$src2),
6932 "crc32{w} \t{$src2, $src1|$src1, $src2}",
6934 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
6936 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
6937 (ins GR32:$src1, i32mem:$src2),
6938 "crc32{l} \t{$src2, $src1|$src1, $src2}",
6940 (int_x86_sse42_crc32_32_32 GR32:$src1,
6941 (load addr:$src2)))]>;
6942 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
6943 (ins GR32:$src1, GR32:$src2),
6944 "crc32{l} \t{$src2, $src1|$src1, $src2}",
6946 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
6947 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
6948 (ins GR64:$src1, i8mem:$src2),
6949 "crc32{b} \t{$src2, $src1|$src1, $src2}",
6951 (int_x86_sse42_crc32_64_8 GR64:$src1,
6952 (load addr:$src2)))]>,
6954 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
6955 (ins GR64:$src1, GR8:$src2),
6956 "crc32{b} \t{$src2, $src1|$src1, $src2}",
6958 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
6960 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
6961 (ins GR64:$src1, i64mem:$src2),
6962 "crc32{q} \t{$src2, $src1|$src1, $src2}",
6964 (int_x86_sse42_crc32_64_64 GR64:$src1,
6965 (load addr:$src2)))]>,
6967 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
6968 (ins GR64:$src1, GR64:$src2),
6969 "crc32{q} \t{$src2, $src1|$src1, $src2}",
6971 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
6975 //===----------------------------------------------------------------------===//
6976 // AES-NI Instructions
6977 //===----------------------------------------------------------------------===//
6979 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
6980 Intrinsic IntId128, bit Is2Addr = 1> {
6981 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
6982 (ins VR128:$src1, VR128:$src2),
6984 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6985 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6986 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
6988 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
6989 (ins VR128:$src1, i128mem:$src2),
6991 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6992 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6994 (IntId128 VR128:$src1, (memopv2i64 addr:$src2)))]>, OpSize;
6997 // Perform One Round of an AES Encryption/Decryption Flow
6998 let Predicates = [HasAVX, HasAES] in {
6999 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7000 int_x86_aesni_aesenc, 0>, VEX_4V;
7001 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7002 int_x86_aesni_aesenclast, 0>, VEX_4V;
7003 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7004 int_x86_aesni_aesdec, 0>, VEX_4V;
7005 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7006 int_x86_aesni_aesdeclast, 0>, VEX_4V;
7009 let Constraints = "$src1 = $dst" in {
7010 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7011 int_x86_aesni_aesenc>;
7012 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7013 int_x86_aesni_aesenclast>;
7014 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7015 int_x86_aesni_aesdec>;
7016 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7017 int_x86_aesni_aesdeclast>;
7020 // Perform the AES InvMixColumn Transformation
7021 let Predicates = [HasAVX, HasAES] in {
7022 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7024 "vaesimc\t{$src1, $dst|$dst, $src1}",
7026 (int_x86_aesni_aesimc VR128:$src1))]>,
7028 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7029 (ins i128mem:$src1),
7030 "vaesimc\t{$src1, $dst|$dst, $src1}",
7031 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7034 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7036 "aesimc\t{$src1, $dst|$dst, $src1}",
7038 (int_x86_aesni_aesimc VR128:$src1))]>,
7040 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7041 (ins i128mem:$src1),
7042 "aesimc\t{$src1, $dst|$dst, $src1}",
7043 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7046 // AES Round Key Generation Assist
7047 let Predicates = [HasAVX, HasAES] in {
7048 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7049 (ins VR128:$src1, i8imm:$src2),
7050 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7052 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7054 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7055 (ins i128mem:$src1, i8imm:$src2),
7056 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7058 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7061 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7062 (ins VR128:$src1, i8imm:$src2),
7063 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7065 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7067 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7068 (ins i128mem:$src1, i8imm:$src2),
7069 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7071 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7074 //===----------------------------------------------------------------------===//
7075 // PCLMUL Instructions
7076 //===----------------------------------------------------------------------===//
7078 // AVX carry-less Multiplication instructions
7079 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7080 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7081 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7083 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
7085 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7086 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7087 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7088 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7089 (memopv2i64 addr:$src2), imm:$src3))]>;
7091 // Carry-less Multiplication instructions
7092 let Constraints = "$src1 = $dst" in {
7093 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7094 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7095 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7097 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
7099 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7100 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7101 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7102 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7103 (memopv2i64 addr:$src2), imm:$src3))]>;
7104 } // Constraints = "$src1 = $dst"
7107 multiclass pclmul_alias<string asm, int immop> {
7108 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7109 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
7111 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7112 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
7114 def : InstAlias<!strconcat("vpclmul", asm,
7115 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7116 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
7118 def : InstAlias<!strconcat("vpclmul", asm,
7119 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7120 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
7122 defm : pclmul_alias<"hqhq", 0x11>;
7123 defm : pclmul_alias<"hqlq", 0x01>;
7124 defm : pclmul_alias<"lqhq", 0x10>;
7125 defm : pclmul_alias<"lqlq", 0x00>;
7127 //===----------------------------------------------------------------------===//
7128 // SSE4A Instructions
7129 //===----------------------------------------------------------------------===//
7131 let Predicates = [HasSSE4A] in {
7133 let Constraints = "$src = $dst" in {
7134 def EXTRQI : Ii8<0x78, MRM0r, (outs VR128:$dst),
7135 (ins VR128:$src, i8imm:$len, i8imm:$idx),
7136 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7137 [(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
7138 imm:$idx))]>, TB, OpSize;
7139 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7140 (ins VR128:$src, VR128:$mask),
7141 "extrq\t{$mask, $src|$src, $mask}",
7142 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7143 VR128:$mask))]>, TB, OpSize;
7145 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7146 (ins VR128:$src, VR128:$src2, i8imm:$len, i8imm:$idx),
7147 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7148 [(set VR128:$dst, (int_x86_sse4a_insertqi VR128:$src,
7149 VR128:$src2, imm:$len, imm:$idx))]>, XD;
7150 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7151 (ins VR128:$src, VR128:$mask),
7152 "insertq\t{$mask, $src|$src, $mask}",
7153 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7154 VR128:$mask))]>, XD;
7157 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7158 "movntss\t{$src, $dst|$dst, $src}",
7159 [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
7161 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7162 "movntsd\t{$src, $dst|$dst, $src}",
7163 [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
7166 //===----------------------------------------------------------------------===//
7168 //===----------------------------------------------------------------------===//
7170 //===----------------------------------------------------------------------===//
7171 // VBROADCAST - Load from memory and broadcast to all elements of the
7172 // destination operand
7174 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
7175 X86MemOperand x86memop, Intrinsic Int> :
7176 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7177 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7178 [(set RC:$dst, (Int addr:$src))]>, VEX;
7180 // AVX2 adds register forms
7181 class avx2_broadcast_reg<bits<8> opc, string OpcodeStr, RegisterClass RC,
7183 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7184 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7185 [(set RC:$dst, (Int VR128:$src))]>, VEX;
7187 let ExeDomain = SSEPackedSingle in {
7188 def VBROADCASTSSrm : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
7189 int_x86_avx_vbroadcast_ss>;
7190 def VBROADCASTSSYrm : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
7191 int_x86_avx_vbroadcast_ss_256>;
7193 let ExeDomain = SSEPackedDouble in
7194 def VBROADCASTSDYrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
7195 int_x86_avx_vbroadcast_sd_256>;
7196 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
7197 int_x86_avx_vbroadcastf128_pd_256>;
7199 let ExeDomain = SSEPackedSingle in {
7200 def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
7201 int_x86_avx2_vbroadcast_ss_ps>;
7202 def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
7203 int_x86_avx2_vbroadcast_ss_ps_256>;
7205 let ExeDomain = SSEPackedDouble in
7206 def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
7207 int_x86_avx2_vbroadcast_sd_pd_256>;
7209 let Predicates = [HasAVX2] in
7210 def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
7211 int_x86_avx2_vbroadcasti128>;
7213 let Predicates = [HasAVX] in
7214 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
7215 (VBROADCASTF128 addr:$src)>;
7218 //===----------------------------------------------------------------------===//
7219 // VINSERTF128 - Insert packed floating-point values
7221 let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
7222 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7223 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
7224 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7227 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7228 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
7229 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7233 let Predicates = [HasAVX] in {
7234 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
7236 (VINSERTF128rr VR256:$src1, VR128:$src2,
7237 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7238 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
7240 (VINSERTF128rr VR256:$src1, VR128:$src2,
7241 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7243 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
7245 (VINSERTF128rm VR256:$src1, addr:$src2,
7246 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7247 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
7249 (VINSERTF128rm VR256:$src1, addr:$src2,
7250 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7253 let Predicates = [HasAVX1Only] in {
7254 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7256 (VINSERTF128rr VR256:$src1, VR128:$src2,
7257 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7258 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7260 (VINSERTF128rr VR256:$src1, VR128:$src2,
7261 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7262 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7264 (VINSERTF128rr VR256:$src1, VR128:$src2,
7265 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7266 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7268 (VINSERTF128rr VR256:$src1, VR128:$src2,
7269 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7271 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
7273 (VINSERTF128rm VR256:$src1, addr:$src2,
7274 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7277 //===----------------------------------------------------------------------===//
7278 // VEXTRACTF128 - Extract packed floating-point values
7280 let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
7281 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7282 (ins VR256:$src1, i8imm:$src2),
7283 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7286 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7287 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
7288 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7292 // Extract and store.
7293 let Predicates = [HasAVX] in {
7294 def : Pat<(alignedstore (int_x86_avx_vextractf128_ps_256 VR256:$src1,
7295 imm:$src2), addr:$dst),
7296 (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
7297 def : Pat<(alignedstore (int_x86_avx_vextractf128_pd_256 VR256:$src1,
7298 imm:$src2), addr:$dst),
7299 (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
7300 def : Pat<(alignedstore (int_x86_avx_vextractf128_si_256 VR256:$src1,
7301 imm:$src2), addr:$dst),
7302 (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
7306 let Predicates = [HasAVX] in {
7307 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
7308 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
7309 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
7310 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
7311 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
7312 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
7314 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7315 (v4f32 (VEXTRACTF128rr
7316 (v8f32 VR256:$src1),
7317 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7318 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7319 (v2f64 (VEXTRACTF128rr
7320 (v4f64 VR256:$src1),
7321 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7324 let Predicates = [HasAVX1Only] in {
7325 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7326 (v2i64 (VEXTRACTF128rr
7327 (v4i64 VR256:$src1),
7328 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7329 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7330 (v4i32 (VEXTRACTF128rr
7331 (v8i32 VR256:$src1),
7332 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7333 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7334 (v8i16 (VEXTRACTF128rr
7335 (v16i16 VR256:$src1),
7336 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7337 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7338 (v16i8 (VEXTRACTF128rr
7339 (v32i8 VR256:$src1),
7340 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7343 //===----------------------------------------------------------------------===//
7344 // VMASKMOV - Conditional SIMD Packed Loads and Stores
7346 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
7347 Intrinsic IntLd, Intrinsic IntLd256,
7348 Intrinsic IntSt, Intrinsic IntSt256> {
7349 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
7350 (ins VR128:$src1, f128mem:$src2),
7351 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7352 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
7354 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
7355 (ins VR256:$src1, f256mem:$src2),
7356 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7357 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
7359 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
7360 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
7361 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7362 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
7363 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
7364 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
7365 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7366 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
7369 let ExeDomain = SSEPackedSingle in
7370 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
7371 int_x86_avx_maskload_ps,
7372 int_x86_avx_maskload_ps_256,
7373 int_x86_avx_maskstore_ps,
7374 int_x86_avx_maskstore_ps_256>;
7375 let ExeDomain = SSEPackedDouble in
7376 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
7377 int_x86_avx_maskload_pd,
7378 int_x86_avx_maskload_pd_256,
7379 int_x86_avx_maskstore_pd,
7380 int_x86_avx_maskstore_pd_256>;
7382 //===----------------------------------------------------------------------===//
7383 // VPERMIL - Permute Single and Double Floating-Point Values
7385 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
7386 RegisterClass RC, X86MemOperand x86memop_f,
7387 X86MemOperand x86memop_i, PatFrag i_frag,
7388 Intrinsic IntVar, ValueType vt> {
7389 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
7390 (ins RC:$src1, RC:$src2),
7391 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7392 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
7393 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
7394 (ins RC:$src1, x86memop_i:$src2),
7395 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7396 [(set RC:$dst, (IntVar RC:$src1,
7397 (bitconvert (i_frag addr:$src2))))]>, VEX_4V;
7399 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
7400 (ins RC:$src1, i8imm:$src2),
7401 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7402 [(set RC:$dst, (vt (X86VPermilp RC:$src1, (i8 imm:$src2))))]>, VEX;
7403 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
7404 (ins x86memop_f:$src1, i8imm:$src2),
7405 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7407 (vt (X86VPermilp (memop addr:$src1), (i8 imm:$src2))))]>, VEX;
7410 let ExeDomain = SSEPackedSingle in {
7411 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
7412 memopv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
7413 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
7414 memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>;
7416 let ExeDomain = SSEPackedDouble in {
7417 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
7418 memopv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
7419 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
7420 memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>;
7423 let Predicates = [HasAVX] in {
7424 def : Pat<(v8i32 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
7425 (VPERMILPSYri VR256:$src1, imm:$imm)>;
7426 def : Pat<(v4i64 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
7427 (VPERMILPDYri VR256:$src1, imm:$imm)>;
7428 def : Pat<(v8i32 (X86VPermilp (bc_v8i32 (memopv4i64 addr:$src1)),
7430 (VPERMILPSYmi addr:$src1, imm:$imm)>;
7431 def : Pat<(v4i64 (X86VPermilp (memopv4i64 addr:$src1), (i8 imm:$imm))),
7432 (VPERMILPDYmi addr:$src1, imm:$imm)>;
7434 def : Pat<(v2i64 (X86VPermilp VR128:$src1, (i8 imm:$imm))),
7435 (VPERMILPDri VR128:$src1, imm:$imm)>;
7436 def : Pat<(v2i64 (X86VPermilp (memopv2i64 addr:$src1), (i8 imm:$imm))),
7437 (VPERMILPDmi addr:$src1, imm:$imm)>;
7440 //===----------------------------------------------------------------------===//
7441 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
7443 let ExeDomain = SSEPackedSingle in {
7444 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
7445 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
7446 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7447 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
7448 (i8 imm:$src3))))]>, VEX_4V;
7449 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
7450 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
7451 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7452 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv8f32 addr:$src2),
7453 (i8 imm:$src3)))]>, VEX_4V;
7456 let Predicates = [HasAVX] in {
7457 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7458 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7459 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
7460 (memopv4f64 addr:$src2), (i8 imm:$imm))),
7461 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7464 let Predicates = [HasAVX1Only] in {
7465 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7466 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7467 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7468 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7469 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7470 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7471 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7472 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7474 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
7475 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7476 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7477 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
7478 (memopv4i64 addr:$src2), (i8 imm:$imm))),
7479 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7480 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
7481 (bc_v32i8 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7482 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7483 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
7484 (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7485 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7488 //===----------------------------------------------------------------------===//
7489 // VZERO - Zero YMM registers
7491 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
7492 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
7493 // Zero All YMM registers
7494 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
7495 [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
7497 // Zero Upper bits of YMM registers
7498 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
7499 [(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;
7502 //===----------------------------------------------------------------------===//
7503 // Half precision conversion instructions
7504 //===----------------------------------------------------------------------===//
7505 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
7506 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7507 "vcvtph2ps\t{$src, $dst|$dst, $src}",
7508 [(set RC:$dst, (Int VR128:$src))]>,
7510 let neverHasSideEffects = 1, mayLoad = 1 in
7511 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7512 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
7515 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
7516 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
7517 (ins RC:$src1, i32i8imm:$src2),
7518 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7519 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
7521 let neverHasSideEffects = 1, mayStore = 1 in
7522 def mr : Ii8<0x1D, MRMDestMem, (outs),
7523 (ins x86memop:$dst, RC:$src1, i32i8imm:$src2),
7524 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
7528 let Predicates = [HasAVX, HasF16C] in {
7529 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
7530 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>;
7531 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
7532 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>;
7535 //===----------------------------------------------------------------------===//
7536 // AVX2 Instructions
7537 //===----------------------------------------------------------------------===//
7539 /// AVX2_binop_rmi_int - AVX2 binary operator with 8-bit immediate
7540 multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
7541 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
7542 X86MemOperand x86memop> {
7543 let isCommutable = 1 in
7544 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
7545 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
7546 !strconcat(OpcodeStr,
7547 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7548 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
7550 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
7551 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
7552 !strconcat(OpcodeStr,
7553 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7556 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
7560 let isCommutable = 0 in {
7561 defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
7562 VR128, memopv2i64, i128mem>;
7563 defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
7564 VR256, memopv4i64, i256mem>;
7567 //===----------------------------------------------------------------------===//
7568 // VPBROADCAST - Load from memory and broadcast to all elements of the
7569 // destination operand
7571 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
7572 X86MemOperand x86memop, PatFrag ld_frag,
7573 Intrinsic Int128, Intrinsic Int256> {
7574 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
7575 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7576 [(set VR128:$dst, (Int128 VR128:$src))]>, VEX;
7577 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
7578 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7580 (Int128 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
7581 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
7582 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7583 [(set VR256:$dst, (Int256 VR128:$src))]>, VEX;
7584 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
7585 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7587 (Int256 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
7590 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
7591 int_x86_avx2_pbroadcastb_128,
7592 int_x86_avx2_pbroadcastb_256>;
7593 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
7594 int_x86_avx2_pbroadcastw_128,
7595 int_x86_avx2_pbroadcastw_256>;
7596 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
7597 int_x86_avx2_pbroadcastd_128,
7598 int_x86_avx2_pbroadcastd_256>;
7599 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
7600 int_x86_avx2_pbroadcastq_128,
7601 int_x86_avx2_pbroadcastq_256>;
7603 let Predicates = [HasAVX2] in {
7604 def : Pat<(v16i8 (X86VBroadcast (loadi8 addr:$src))),
7605 (VPBROADCASTBrm addr:$src)>;
7606 def : Pat<(v32i8 (X86VBroadcast (loadi8 addr:$src))),
7607 (VPBROADCASTBYrm addr:$src)>;
7608 def : Pat<(v8i16 (X86VBroadcast (loadi16 addr:$src))),
7609 (VPBROADCASTWrm addr:$src)>;
7610 def : Pat<(v16i16 (X86VBroadcast (loadi16 addr:$src))),
7611 (VPBROADCASTWYrm addr:$src)>;
7612 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
7613 (VPBROADCASTDrm addr:$src)>;
7614 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
7615 (VPBROADCASTDYrm addr:$src)>;
7616 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
7617 (VPBROADCASTQrm addr:$src)>;
7618 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
7619 (VPBROADCASTQYrm addr:$src)>;
7621 def : Pat<(v16i8 (X86VBroadcast (v16i8 VR128:$src))),
7622 (VPBROADCASTBrr VR128:$src)>;
7623 def : Pat<(v32i8 (X86VBroadcast (v16i8 VR128:$src))),
7624 (VPBROADCASTBYrr VR128:$src)>;
7625 def : Pat<(v8i16 (X86VBroadcast (v8i16 VR128:$src))),
7626 (VPBROADCASTWrr VR128:$src)>;
7627 def : Pat<(v16i16 (X86VBroadcast (v8i16 VR128:$src))),
7628 (VPBROADCASTWYrr VR128:$src)>;
7629 def : Pat<(v4i32 (X86VBroadcast (v4i32 VR128:$src))),
7630 (VPBROADCASTDrr VR128:$src)>;
7631 def : Pat<(v8i32 (X86VBroadcast (v4i32 VR128:$src))),
7632 (VPBROADCASTDYrr VR128:$src)>;
7633 def : Pat<(v2i64 (X86VBroadcast (v2i64 VR128:$src))),
7634 (VPBROADCASTQrr VR128:$src)>;
7635 def : Pat<(v4i64 (X86VBroadcast (v2i64 VR128:$src))),
7636 (VPBROADCASTQYrr VR128:$src)>;
7637 def : Pat<(v4f32 (X86VBroadcast (v4f32 VR128:$src))),
7638 (VBROADCASTSSrr VR128:$src)>;
7639 def : Pat<(v8f32 (X86VBroadcast (v4f32 VR128:$src))),
7640 (VBROADCASTSSYrr VR128:$src)>;
7641 def : Pat<(v2f64 (X86VBroadcast (v2f64 VR128:$src))),
7642 (VPBROADCASTQrr VR128:$src)>;
7643 def : Pat<(v4f64 (X86VBroadcast (v2f64 VR128:$src))),
7644 (VBROADCASTSDYrr VR128:$src)>;
7646 // Provide fallback in case the load node that is used in the patterns above
7647 // is used by additional users, which prevents the pattern selection.
7648 let AddedComplexity = 20 in {
7649 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7650 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
7651 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7652 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
7653 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7654 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
7656 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7657 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
7658 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7659 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
7660 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7661 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
7665 // AVX1 broadcast patterns
7666 let Predicates = [HasAVX1Only] in {
7667 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
7668 (VBROADCASTSSYrm addr:$src)>;
7669 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
7670 (VBROADCASTSDYrm addr:$src)>;
7671 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
7672 (VBROADCASTSSrm addr:$src)>;
7675 let Predicates = [HasAVX] in {
7676 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
7677 (VBROADCASTSSYrm addr:$src)>;
7678 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
7679 (VBROADCASTSDYrm addr:$src)>;
7680 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
7681 (VBROADCASTSSrm addr:$src)>;
7683 // Provide fallback in case the load node that is used in the patterns above
7684 // is used by additional users, which prevents the pattern selection.
7685 let AddedComplexity = 20 in {
7686 // 128bit broadcasts:
7687 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7688 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
7689 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7690 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
7691 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
7692 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
7693 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7694 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
7695 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
7696 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
7698 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7699 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
7700 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7701 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7702 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
7703 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
7704 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7705 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
7706 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
7707 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
7711 //===----------------------------------------------------------------------===//
7712 // VPERM - Permute instructions
7715 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7717 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
7718 (ins VR256:$src1, VR256:$src2),
7719 !strconcat(OpcodeStr,
7720 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7722 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>, VEX_4V;
7723 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
7724 (ins VR256:$src1, i256mem:$src2),
7725 !strconcat(OpcodeStr,
7726 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7728 (OpVT (X86VPermv VR256:$src1,
7729 (bitconvert (mem_frag addr:$src2)))))]>,
7733 defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, v8i32>;
7734 let ExeDomain = SSEPackedSingle in
7735 defm VPERMPS : avx2_perm<0x16, "vpermps", memopv8f32, v8f32>;
7737 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7739 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
7740 (ins VR256:$src1, i8imm:$src2),
7741 !strconcat(OpcodeStr,
7742 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7744 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>, VEX;
7745 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
7746 (ins i256mem:$src1, i8imm:$src2),
7747 !strconcat(OpcodeStr,
7748 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7750 (OpVT (X86VPermi (mem_frag addr:$src1),
7751 (i8 imm:$src2))))]>, VEX;
7754 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, v4i64>, VEX_W;
7755 let ExeDomain = SSEPackedDouble in
7756 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, v4f64>, VEX_W;
7758 //===----------------------------------------------------------------------===//
7759 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
7761 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
7762 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
7763 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7764 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
7765 (i8 imm:$src3))))]>, VEX_4V;
7766 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
7767 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
7768 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7769 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv4i64 addr:$src2),
7770 (i8 imm:$src3)))]>, VEX_4V;
7772 let Predicates = [HasAVX2] in {
7773 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7774 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7775 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7776 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7777 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7778 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7780 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (memopv4i64 addr:$src2)),
7782 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
7783 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
7784 (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7785 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
7786 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)),
7788 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
7792 //===----------------------------------------------------------------------===//
7793 // VINSERTI128 - Insert packed integer values
7795 let neverHasSideEffects = 1 in {
7796 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
7797 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
7798 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7801 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
7802 (ins VR256:$src1, i128mem:$src2, i8imm:$src3),
7803 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7807 let Predicates = [HasAVX2] in {
7808 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7810 (VINSERTI128rr VR256:$src1, VR128:$src2,
7811 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7812 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7814 (VINSERTI128rr VR256:$src1, VR128:$src2,
7815 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7816 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7818 (VINSERTI128rr VR256:$src1, VR128:$src2,
7819 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7820 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7822 (VINSERTI128rr VR256:$src1, VR128:$src2,
7823 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7825 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
7827 (VINSERTI128rm VR256:$src1, addr:$src2,
7828 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7831 //===----------------------------------------------------------------------===//
7832 // VEXTRACTI128 - Extract packed integer values
7834 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
7835 (ins VR256:$src1, i8imm:$src2),
7836 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7838 (int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
7840 let neverHasSideEffects = 1, mayStore = 1 in
7841 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
7842 (ins i128mem:$dst, VR256:$src1, i8imm:$src2),
7843 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, VEX;
7845 let Predicates = [HasAVX2] in {
7846 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7847 (v2i64 (VEXTRACTI128rr
7848 (v4i64 VR256:$src1),
7849 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7850 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7851 (v4i32 (VEXTRACTI128rr
7852 (v8i32 VR256:$src1),
7853 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7854 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7855 (v8i16 (VEXTRACTI128rr
7856 (v16i16 VR256:$src1),
7857 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7858 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
7859 (v16i8 (VEXTRACTI128rr
7860 (v32i8 VR256:$src1),
7861 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7864 //===----------------------------------------------------------------------===//
7865 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
7867 multiclass avx2_pmovmask<string OpcodeStr,
7868 Intrinsic IntLd128, Intrinsic IntLd256,
7869 Intrinsic IntSt128, Intrinsic IntSt256> {
7870 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
7871 (ins VR128:$src1, i128mem:$src2),
7872 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7873 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
7874 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
7875 (ins VR256:$src1, i256mem:$src2),
7876 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7877 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>, VEX_4V;
7878 def mr : AVX28I<0x8e, MRMDestMem, (outs),
7879 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
7880 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7881 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
7882 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
7883 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
7884 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7885 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
7888 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
7889 int_x86_avx2_maskload_d,
7890 int_x86_avx2_maskload_d_256,
7891 int_x86_avx2_maskstore_d,
7892 int_x86_avx2_maskstore_d_256>;
7893 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
7894 int_x86_avx2_maskload_q,
7895 int_x86_avx2_maskload_q_256,
7896 int_x86_avx2_maskstore_q,
7897 int_x86_avx2_maskstore_q_256>, VEX_W;
7900 //===----------------------------------------------------------------------===//
7901 // Variable Bit Shifts
7903 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
7904 ValueType vt128, ValueType vt256> {
7905 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
7906 (ins VR128:$src1, VR128:$src2),
7907 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7909 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
7911 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
7912 (ins VR128:$src1, i128mem:$src2),
7913 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7915 (vt128 (OpNode VR128:$src1,
7916 (vt128 (bitconvert (memopv2i64 addr:$src2))))))]>,
7918 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
7919 (ins VR256:$src1, VR256:$src2),
7920 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7922 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
7924 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
7925 (ins VR256:$src1, i256mem:$src2),
7926 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7928 (vt256 (OpNode VR256:$src1,
7929 (vt256 (bitconvert (memopv4i64 addr:$src2))))))]>,
7933 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
7934 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
7935 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
7936 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
7937 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
7939 //===----------------------------------------------------------------------===//
7940 // VGATHER - GATHER Operations
7941 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
7942 X86MemOperand memop128, X86MemOperand memop256> {
7943 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
7944 (ins VR128:$src1, memop128:$src2, VR128:$mask),
7945 !strconcat(OpcodeStr,
7946 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
7948 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
7949 (ins RC256:$src1, memop256:$src2, RC256:$mask),
7950 !strconcat(OpcodeStr,
7951 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
7952 []>, VEX_4VOp3, VEX_L;
7955 let mayLoad = 1, Constraints = "$src1 = $dst, $mask = $mask_wb" in {
7956 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
7957 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
7958 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
7959 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
7960 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
7961 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
7962 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
7963 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;