1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
21 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
27 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
28 InstrItinClass arg_ri> {
29 InstrItinClass rr = arg_rr;
30 InstrItinClass rm = arg_rm;
31 InstrItinClass ri = arg_ri;
36 def SSE_ALU_F32S : OpndItins<
37 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
40 def SSE_ALU_F64S : OpndItins<
41 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
44 def SSE_ALU_ITINS_S : SizeItins<
45 SSE_ALU_F32S, SSE_ALU_F64S
48 def SSE_MUL_F32S : OpndItins<
49 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
52 def SSE_MUL_F64S : OpndItins<
53 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
56 def SSE_MUL_ITINS_S : SizeItins<
57 SSE_MUL_F32S, SSE_MUL_F64S
60 def SSE_DIV_F32S : OpndItins<
61 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
64 def SSE_DIV_F64S : OpndItins<
65 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
68 def SSE_DIV_ITINS_S : SizeItins<
69 SSE_DIV_F32S, SSE_DIV_F64S
73 def SSE_ALU_F32P : OpndItins<
74 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
77 def SSE_ALU_F64P : OpndItins<
78 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
81 def SSE_ALU_ITINS_P : SizeItins<
82 SSE_ALU_F32P, SSE_ALU_F64P
85 def SSE_MUL_F32P : OpndItins<
86 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
89 def SSE_MUL_F64P : OpndItins<
90 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
93 def SSE_MUL_ITINS_P : SizeItins<
94 SSE_MUL_F32P, SSE_MUL_F64P
97 def SSE_DIV_F32P : OpndItins<
98 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
101 def SSE_DIV_F64P : OpndItins<
102 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
105 def SSE_DIV_ITINS_P : SizeItins<
106 SSE_DIV_F32P, SSE_DIV_F64P
109 def SSE_BIT_ITINS_P : OpndItins<
110 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
113 def SSE_INTALU_ITINS_P : OpndItins<
114 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
117 def SSE_INTALUQ_ITINS_P : OpndItins<
118 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
121 def SSE_INTMUL_ITINS_P : OpndItins<
122 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
125 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
126 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
129 def SSE_MOVA_ITINS : OpndItins<
130 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
133 def SSE_MOVU_ITINS : OpndItins<
134 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
137 //===----------------------------------------------------------------------===//
138 // SSE 1 & 2 Instructions Classes
139 //===----------------------------------------------------------------------===//
141 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
142 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
143 RegisterClass RC, X86MemOperand x86memop,
146 let isCommutable = 1 in {
147 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
149 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
150 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
151 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr>;
153 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
155 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
156 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
157 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm>;
160 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
161 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
162 string asm, string SSEVer, string FPSizeStr,
163 Operand memopr, ComplexPattern mem_cpat,
166 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
168 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
169 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
170 [(set RC:$dst, (!cast<Intrinsic>(
171 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
172 RC:$src1, RC:$src2))], itins.rr>;
173 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
175 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
176 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
177 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
178 SSEVer, "_", OpcodeStr, FPSizeStr))
179 RC:$src1, mem_cpat:$src2))], itins.rm>;
182 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
183 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
184 RegisterClass RC, ValueType vt,
185 X86MemOperand x86memop, PatFrag mem_frag,
186 Domain d, OpndItins itins, bit Is2Addr = 1> {
187 let isCommutable = 1 in
188 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
190 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
191 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
192 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>;
194 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
196 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
197 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
198 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
202 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
203 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
204 string OpcodeStr, X86MemOperand x86memop,
205 list<dag> pat_rr, list<dag> pat_rm,
207 bit rr_hasSideEffects = 0> {
208 let isCommutable = 1, neverHasSideEffects = rr_hasSideEffects in
209 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
211 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
212 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
213 pat_rr, IIC_DEFAULT, d>;
214 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
216 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
217 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
218 pat_rm, IIC_DEFAULT, d>;
221 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
222 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
223 string asm, string SSEVer, string FPSizeStr,
224 X86MemOperand x86memop, PatFrag mem_frag,
225 Domain d, OpndItins itins, bit Is2Addr = 1> {
226 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
228 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
229 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
230 [(set RC:$dst, (!cast<Intrinsic>(
231 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
232 RC:$src1, RC:$src2))], IIC_DEFAULT, d>;
233 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
235 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
236 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
237 [(set RC:$dst, (!cast<Intrinsic>(
238 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
239 RC:$src1, (mem_frag addr:$src2)))], IIC_DEFAULT, d>;
242 //===----------------------------------------------------------------------===//
243 // Non-instruction patterns
244 //===----------------------------------------------------------------------===//
246 // A vector extract of the first f32/f64 position is a subregister copy
247 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
248 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
249 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
250 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
252 // A 128-bit subvector extract from the first 256-bit vector position
253 // is a subregister copy that needs no instruction.
254 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
255 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
256 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
257 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
259 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
260 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
261 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
262 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
264 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
265 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
266 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
267 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
269 // A 128-bit subvector insert to the first 256-bit vector position
270 // is a subregister copy that needs no instruction.
271 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
272 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
273 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
274 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
275 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
276 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
277 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
278 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
279 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
280 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
281 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
282 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
283 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
286 // Implicitly promote a 32-bit scalar to a vector.
287 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
288 (COPY_TO_REGCLASS FR32:$src, VR128)>;
289 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
290 (COPY_TO_REGCLASS FR32:$src, VR128)>;
291 // Implicitly promote a 64-bit scalar to a vector.
292 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
293 (COPY_TO_REGCLASS FR64:$src, VR128)>;
294 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
295 (COPY_TO_REGCLASS FR64:$src, VR128)>;
297 // Bitcasts between 128-bit vector types. Return the original type since
298 // no instruction is needed for the conversion
299 let Predicates = [HasSSE2] in {
300 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
301 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
302 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
303 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
304 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
305 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
306 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
307 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
308 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
309 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
310 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
311 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
312 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
313 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
314 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
315 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
316 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
317 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
318 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
319 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
320 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
321 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
322 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
323 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
324 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
325 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
326 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
327 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
328 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
329 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
332 // Bitcasts between 256-bit vector types. Return the original type since
333 // no instruction is needed for the conversion
334 let Predicates = [HasAVX] in {
335 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
336 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
337 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
338 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
339 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
340 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
341 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
342 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
343 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
344 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
345 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
346 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
347 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
348 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
349 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
350 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
351 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
352 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
353 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
354 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
355 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
356 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
357 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
358 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
359 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
360 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
361 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
362 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
363 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
364 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
367 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
368 // This is expanded by ExpandPostRAPseudos.
369 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
371 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
372 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
373 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
374 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
377 //===----------------------------------------------------------------------===//
378 // AVX & SSE - Zero/One Vectors
379 //===----------------------------------------------------------------------===//
381 // Alias instruction that maps zero vector to pxor / xorp* for sse.
382 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
383 // swizzled by ExecutionDepsFix to pxor.
384 // We set canFoldAsLoad because this can be converted to a constant-pool
385 // load of an all-zeros value if folding it would be beneficial.
386 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
388 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
389 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
392 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
393 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
394 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
395 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
396 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
399 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
400 // and doesn't need it because on sandy bridge the register is set to zero
401 // at the rename stage without using any execution unit, so SET0PSY
402 // and SET0PDY can be used for vector int instructions without penalty
403 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
404 isPseudo = 1, Predicates = [HasAVX] in {
405 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
406 [(set VR256:$dst, (v8f32 immAllZerosV))]>;
409 let Predicates = [HasAVX] in
410 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
412 let Predicates = [HasAVX2] in {
413 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
414 def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
415 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
416 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
419 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
420 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
421 let Predicates = [HasAVX1Only] in {
422 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
423 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
424 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
426 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
427 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
428 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
430 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
431 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
432 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
434 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
435 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
436 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
439 // We set canFoldAsLoad because this can be converted to a constant-pool
440 // load of an all-ones value if folding it would be beneficial.
441 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
443 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
444 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
445 let Predicates = [HasAVX2] in
446 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
447 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
451 //===----------------------------------------------------------------------===//
452 // SSE 1 & 2 - Move FP Scalar Instructions
454 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
455 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
456 // is used instead. Register-to-register movss/movsd is not modeled as an
457 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
458 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
459 //===----------------------------------------------------------------------===//
461 class sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt, string asm> :
462 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
463 [(set VR128:$dst, (vt (OpNode VR128:$src1,
464 (scalar_to_vector RC:$src2))))],
467 // Loading from memory automatically zeroing upper bits.
468 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
469 PatFrag mem_pat, string OpcodeStr> :
470 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
471 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
472 [(set RC:$dst, (mem_pat addr:$src))],
476 def VMOVSSrr : sse12_move_rr<FR32, X86Movss, v4f32,
477 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V,
479 def VMOVSDrr : sse12_move_rr<FR64, X86Movsd, v2f64,
480 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V,
483 // For the disassembler
484 let isCodeGenOnly = 1 in {
485 def VMOVSSrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
486 (ins VR128:$src1, FR32:$src2),
487 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
490 def VMOVSDrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
491 (ins VR128:$src1, FR64:$src2),
492 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
497 let canFoldAsLoad = 1, isReMaterializable = 1 in {
498 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX,
500 let AddedComplexity = 20 in
501 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX,
505 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
506 "movss\t{$src, $dst|$dst, $src}",
507 [(store FR32:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
509 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
510 "movsd\t{$src, $dst|$dst, $src}",
511 [(store FR64:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
515 let Constraints = "$src1 = $dst" in {
516 def MOVSSrr : sse12_move_rr<FR32, X86Movss, v4f32,
517 "movss\t{$src2, $dst|$dst, $src2}">, XS;
518 def MOVSDrr : sse12_move_rr<FR64, X86Movsd, v2f64,
519 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
521 // For the disassembler
522 let isCodeGenOnly = 1 in {
523 def MOVSSrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
524 (ins VR128:$src1, FR32:$src2),
525 "movss\t{$src2, $dst|$dst, $src2}", [],
526 IIC_SSE_MOV_S_RR>, XS;
527 def MOVSDrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
528 (ins VR128:$src1, FR64:$src2),
529 "movsd\t{$src2, $dst|$dst, $src2}", [],
530 IIC_SSE_MOV_S_RR>, XD;
534 let canFoldAsLoad = 1, isReMaterializable = 1 in {
535 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
537 let AddedComplexity = 20 in
538 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
541 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
542 "movss\t{$src, $dst|$dst, $src}",
543 [(store FR32:$src, addr:$dst)], IIC_SSE_MOV_S_MR>;
544 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
545 "movsd\t{$src, $dst|$dst, $src}",
546 [(store FR64:$src, addr:$dst)], IIC_SSE_MOV_S_MR>;
549 let Predicates = [HasAVX] in {
550 let AddedComplexity = 15 in {
551 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
552 // MOVS{S,D} to the lower bits.
553 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
554 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
555 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
556 (VMOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
557 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
558 (VMOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
559 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
560 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
562 // Move low f32 and clear high bits.
563 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
564 (SUBREG_TO_REG (i32 0),
565 (VMOVSSrr (v4f32 (V_SET0)),
566 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)), sub_xmm)>;
567 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
568 (SUBREG_TO_REG (i32 0),
569 (VMOVSSrr (v4i32 (V_SET0)),
570 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)), sub_xmm)>;
573 let AddedComplexity = 20 in {
574 // MOVSSrm zeros the high parts of the register; represent this
575 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
576 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
577 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
578 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
579 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
580 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
581 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
583 // MOVSDrm zeros the high parts of the register; represent this
584 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
585 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
586 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
587 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
588 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
589 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
590 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
591 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
592 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
593 def : Pat<(v2f64 (X86vzload addr:$src)),
594 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
596 // Represent the same patterns above but in the form they appear for
598 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
599 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
600 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
601 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
602 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
603 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
604 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
605 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
606 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
608 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
609 (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
610 (SUBREG_TO_REG (i32 0),
611 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
613 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
614 (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
615 (SUBREG_TO_REG (i64 0),
616 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
618 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
619 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
620 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
622 // Move low f64 and clear high bits.
623 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
624 (SUBREG_TO_REG (i32 0),
625 (VMOVSDrr (v2f64 (V_SET0)),
626 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)), sub_xmm)>;
628 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
629 (SUBREG_TO_REG (i32 0),
630 (VMOVSDrr (v2i64 (V_SET0)),
631 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)), sub_xmm)>;
633 // Extract and store.
634 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
636 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
637 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
639 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
641 // Shuffle with VMOVSS
642 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
643 (VMOVSSrr (v4i32 VR128:$src1),
644 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
645 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
646 (VMOVSSrr (v4f32 VR128:$src1),
647 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
650 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
651 (SUBREG_TO_REG (i32 0),
652 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
653 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
655 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
656 (SUBREG_TO_REG (i32 0),
657 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
658 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
661 // Shuffle with VMOVSD
662 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
663 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
664 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
665 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
666 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
667 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
668 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
669 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
672 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
673 (SUBREG_TO_REG (i32 0),
674 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
675 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
677 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
678 (SUBREG_TO_REG (i32 0),
679 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
680 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
684 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
685 // is during lowering, where it's not possible to recognize the fold cause
686 // it has two uses through a bitcast. One use disappears at isel time and the
687 // fold opportunity reappears.
688 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
689 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
690 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
691 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
692 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
693 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
694 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
695 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
698 let Predicates = [UseSSE1] in {
699 let AddedComplexity = 15 in {
700 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
701 // MOVSS to the lower bits.
702 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
703 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
704 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
705 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
706 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
707 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
710 let AddedComplexity = 20 in {
711 // MOVSSrm already zeros the high parts of the register.
712 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
713 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
714 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
715 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
716 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
717 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
720 // Extract and store.
721 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
723 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
725 // Shuffle with MOVSS
726 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
727 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
728 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
729 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
732 let Predicates = [UseSSE2] in {
733 let AddedComplexity = 15 in {
734 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
735 // MOVSD to the lower bits.
736 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
737 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
740 let AddedComplexity = 20 in {
741 // MOVSDrm already zeros the high parts of the register.
742 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
743 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
744 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
745 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
746 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
747 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
748 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
749 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
750 def : Pat<(v2f64 (X86vzload addr:$src)),
751 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
754 // Extract and store.
755 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
757 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
759 // Shuffle with MOVSD
760 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
761 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
762 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
763 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
764 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
765 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
766 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
767 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
769 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
770 // is during lowering, where it's not possible to recognize the fold cause
771 // it has two uses through a bitcast. One use disappears at isel time and the
772 // fold opportunity reappears.
773 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
774 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
775 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
776 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
777 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
778 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
779 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
780 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
783 //===----------------------------------------------------------------------===//
784 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
785 //===----------------------------------------------------------------------===//
787 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
788 X86MemOperand x86memop, PatFrag ld_frag,
789 string asm, Domain d,
791 bit IsReMaterializable = 1> {
792 let neverHasSideEffects = 1 in
793 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
794 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>;
795 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
796 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
797 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
798 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>;
801 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
802 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
804 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
805 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
807 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
808 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
810 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
811 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
814 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
815 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
817 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
818 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
819 TB, OpSize, VEX, VEX_L;
820 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
821 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
823 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
824 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
825 TB, OpSize, VEX, VEX_L;
826 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
827 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
829 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
830 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
832 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
833 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
835 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
836 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
839 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
840 "movaps\t{$src, $dst|$dst, $src}",
841 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
842 IIC_SSE_MOVA_P_MR>, VEX;
843 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
844 "movapd\t{$src, $dst|$dst, $src}",
845 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
846 IIC_SSE_MOVA_P_MR>, VEX;
847 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
848 "movups\t{$src, $dst|$dst, $src}",
849 [(store (v4f32 VR128:$src), addr:$dst)],
850 IIC_SSE_MOVU_P_MR>, VEX;
851 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
852 "movupd\t{$src, $dst|$dst, $src}",
853 [(store (v2f64 VR128:$src), addr:$dst)],
854 IIC_SSE_MOVU_P_MR>, VEX;
855 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
856 "movaps\t{$src, $dst|$dst, $src}",
857 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
858 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
859 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
860 "movapd\t{$src, $dst|$dst, $src}",
861 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
862 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
863 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
864 "movups\t{$src, $dst|$dst, $src}",
865 [(store (v8f32 VR256:$src), addr:$dst)],
866 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
867 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
868 "movupd\t{$src, $dst|$dst, $src}",
869 [(store (v4f64 VR256:$src), addr:$dst)],
870 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
873 let isCodeGenOnly = 1 in {
874 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
876 "movaps\t{$src, $dst|$dst, $src}", [],
877 IIC_SSE_MOVA_P_RR>, VEX;
878 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
880 "movapd\t{$src, $dst|$dst, $src}", [],
881 IIC_SSE_MOVA_P_RR>, VEX;
882 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
884 "movups\t{$src, $dst|$dst, $src}", [],
885 IIC_SSE_MOVU_P_RR>, VEX;
886 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
888 "movupd\t{$src, $dst|$dst, $src}", [],
889 IIC_SSE_MOVU_P_RR>, VEX;
890 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
892 "movaps\t{$src, $dst|$dst, $src}", [],
893 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
894 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
896 "movapd\t{$src, $dst|$dst, $src}", [],
897 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
898 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
900 "movups\t{$src, $dst|$dst, $src}", [],
901 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
902 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
904 "movupd\t{$src, $dst|$dst, $src}", [],
905 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
908 let Predicates = [HasAVX] in {
909 def : Pat<(v8i32 (X86vzmovl
910 (insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))),
911 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
912 def : Pat<(v4i64 (X86vzmovl
913 (insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))),
914 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
915 def : Pat<(v8f32 (X86vzmovl
916 (insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))),
917 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
918 def : Pat<(v4f64 (X86vzmovl
919 (insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))),
920 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
924 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
925 (VMOVUPSYmr addr:$dst, VR256:$src)>;
926 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
927 (VMOVUPDYmr addr:$dst, VR256:$src)>;
929 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
930 "movaps\t{$src, $dst|$dst, $src}",
931 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
933 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
934 "movapd\t{$src, $dst|$dst, $src}",
935 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
937 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
938 "movups\t{$src, $dst|$dst, $src}",
939 [(store (v4f32 VR128:$src), addr:$dst)],
941 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
942 "movupd\t{$src, $dst|$dst, $src}",
943 [(store (v2f64 VR128:$src), addr:$dst)],
947 let isCodeGenOnly = 1 in {
948 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
949 "movaps\t{$src, $dst|$dst, $src}", [],
951 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
952 "movapd\t{$src, $dst|$dst, $src}", [],
954 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
955 "movups\t{$src, $dst|$dst, $src}", [],
957 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
958 "movupd\t{$src, $dst|$dst, $src}", [],
962 let Predicates = [HasAVX] in {
963 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
964 (VMOVUPSmr addr:$dst, VR128:$src)>;
965 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
966 (VMOVUPDmr addr:$dst, VR128:$src)>;
969 let Predicates = [UseSSE1] in
970 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
971 (MOVUPSmr addr:$dst, VR128:$src)>;
972 let Predicates = [UseSSE2] in
973 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
974 (MOVUPDmr addr:$dst, VR128:$src)>;
976 // Use vmovaps/vmovups for AVX integer load/store.
977 let Predicates = [HasAVX] in {
978 // 128-bit load/store
979 def : Pat<(alignedloadv2i64 addr:$src),
980 (VMOVAPSrm addr:$src)>;
981 def : Pat<(loadv2i64 addr:$src),
982 (VMOVUPSrm addr:$src)>;
984 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
985 (VMOVAPSmr addr:$dst, VR128:$src)>;
986 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
987 (VMOVAPSmr addr:$dst, VR128:$src)>;
988 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
989 (VMOVAPSmr addr:$dst, VR128:$src)>;
990 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
991 (VMOVAPSmr addr:$dst, VR128:$src)>;
992 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
993 (VMOVUPSmr addr:$dst, VR128:$src)>;
994 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
995 (VMOVUPSmr addr:$dst, VR128:$src)>;
996 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
997 (VMOVUPSmr addr:$dst, VR128:$src)>;
998 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
999 (VMOVUPSmr addr:$dst, VR128:$src)>;
1001 // 256-bit load/store
1002 def : Pat<(alignedloadv4i64 addr:$src),
1003 (VMOVAPSYrm addr:$src)>;
1004 def : Pat<(loadv4i64 addr:$src),
1005 (VMOVUPSYrm addr:$src)>;
1006 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1007 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1008 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1009 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1010 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1011 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1012 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1013 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1014 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1015 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1016 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1017 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1018 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1019 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1020 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1021 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1023 // Special patterns for storing subvector extracts of lower 128-bits
1024 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
1025 def : Pat<(alignedstore (v2f64 (extract_subvector
1026 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1027 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1028 def : Pat<(alignedstore (v4f32 (extract_subvector
1029 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1030 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1031 def : Pat<(alignedstore (v2i64 (extract_subvector
1032 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1033 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1034 def : Pat<(alignedstore (v4i32 (extract_subvector
1035 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1036 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1037 def : Pat<(alignedstore (v8i16 (extract_subvector
1038 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1039 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1040 def : Pat<(alignedstore (v16i8 (extract_subvector
1041 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1042 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1044 def : Pat<(store (v2f64 (extract_subvector
1045 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1046 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1047 def : Pat<(store (v4f32 (extract_subvector
1048 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1049 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1050 def : Pat<(store (v2i64 (extract_subvector
1051 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1052 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1053 def : Pat<(store (v4i32 (extract_subvector
1054 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1055 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1056 def : Pat<(store (v8i16 (extract_subvector
1057 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1058 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1059 def : Pat<(store (v16i8 (extract_subvector
1060 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1061 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1064 // Use movaps / movups for SSE integer load / store (one byte shorter).
1065 // The instructions selected below are then converted to MOVDQA/MOVDQU
1066 // during the SSE domain pass.
1067 let Predicates = [UseSSE1] in {
1068 def : Pat<(alignedloadv2i64 addr:$src),
1069 (MOVAPSrm addr:$src)>;
1070 def : Pat<(loadv2i64 addr:$src),
1071 (MOVUPSrm addr:$src)>;
1073 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1074 (MOVAPSmr addr:$dst, VR128:$src)>;
1075 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1076 (MOVAPSmr addr:$dst, VR128:$src)>;
1077 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1078 (MOVAPSmr addr:$dst, VR128:$src)>;
1079 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1080 (MOVAPSmr addr:$dst, VR128:$src)>;
1081 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1082 (MOVUPSmr addr:$dst, VR128:$src)>;
1083 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1084 (MOVUPSmr addr:$dst, VR128:$src)>;
1085 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1086 (MOVUPSmr addr:$dst, VR128:$src)>;
1087 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1088 (MOVUPSmr addr:$dst, VR128:$src)>;
1091 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1092 // bits are disregarded. FIXME: Set encoding to pseudo!
1093 let neverHasSideEffects = 1 in {
1094 def FsVMOVAPSrr : VPSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1095 "movaps\t{$src, $dst|$dst, $src}", [],
1096 IIC_SSE_MOVA_P_RR>, VEX;
1097 def FsVMOVAPDrr : VPDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1098 "movapd\t{$src, $dst|$dst, $src}", [],
1099 IIC_SSE_MOVA_P_RR>, VEX;
1100 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1101 "movaps\t{$src, $dst|$dst, $src}", [],
1103 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1104 "movapd\t{$src, $dst|$dst, $src}", [],
1108 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1109 // bits are disregarded. FIXME: Set encoding to pseudo!
1110 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1111 let isCodeGenOnly = 1 in {
1112 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1113 "movaps\t{$src, $dst|$dst, $src}",
1114 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1115 IIC_SSE_MOVA_P_RM>, VEX;
1116 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1117 "movapd\t{$src, $dst|$dst, $src}",
1118 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1119 IIC_SSE_MOVA_P_RM>, VEX;
1121 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1122 "movaps\t{$src, $dst|$dst, $src}",
1123 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1125 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1126 "movapd\t{$src, $dst|$dst, $src}",
1127 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1131 //===----------------------------------------------------------------------===//
1132 // SSE 1 & 2 - Move Low packed FP Instructions
1133 //===----------------------------------------------------------------------===//
1135 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
1136 SDNode psnode, SDNode pdnode, string base_opc,
1137 string asm_opr, InstrItinClass itin> {
1138 def PSrm : PI<opc, MRMSrcMem,
1139 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1140 !strconcat(base_opc, "s", asm_opr),
1143 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1144 itin, SSEPackedSingle>, TB;
1146 def PDrm : PI<opc, MRMSrcMem,
1147 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
1148 !strconcat(base_opc, "d", asm_opr),
1149 [(set RC:$dst, (v2f64 (pdnode RC:$src1,
1150 (scalar_to_vector (loadf64 addr:$src2)))))],
1151 itin, SSEPackedDouble>, TB, OpSize;
1154 let AddedComplexity = 20 in {
1155 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, X86Movlps, X86Movlpd, "movlp",
1156 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1157 IIC_SSE_MOV_LH>, VEX_4V;
1159 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1160 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, X86Movlps, X86Movlpd, "movlp",
1161 "\t{$src2, $dst|$dst, $src2}",
1165 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1166 "movlps\t{$src, $dst|$dst, $src}",
1167 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1168 (iPTR 0))), addr:$dst)],
1169 IIC_SSE_MOV_LH>, VEX;
1170 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1171 "movlpd\t{$src, $dst|$dst, $src}",
1172 [(store (f64 (vector_extract (v2f64 VR128:$src),
1173 (iPTR 0))), addr:$dst)],
1174 IIC_SSE_MOV_LH>, VEX;
1175 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1176 "movlps\t{$src, $dst|$dst, $src}",
1177 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1178 (iPTR 0))), addr:$dst)],
1180 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1181 "movlpd\t{$src, $dst|$dst, $src}",
1182 [(store (f64 (vector_extract (v2f64 VR128:$src),
1183 (iPTR 0))), addr:$dst)],
1186 let Predicates = [HasAVX] in {
1187 // Shuffle with VMOVLPS
1188 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1189 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1190 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1191 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1193 // Shuffle with VMOVLPD
1194 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1195 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1196 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1197 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1200 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1202 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1203 def : Pat<(store (v4i32 (X86Movlps
1204 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1205 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1206 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1208 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1209 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1211 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1214 let Predicates = [UseSSE1] in {
1215 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1216 def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
1217 (iPTR 0))), addr:$src1),
1218 (MOVLPSmr addr:$src1, VR128:$src2)>;
1220 // Shuffle with MOVLPS
1221 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1222 (MOVLPSrm VR128:$src1, addr:$src2)>;
1223 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1224 (MOVLPSrm VR128:$src1, addr:$src2)>;
1225 def : Pat<(X86Movlps VR128:$src1,
1226 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1227 (MOVLPSrm VR128:$src1, addr:$src2)>;
1230 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1232 (MOVLPSmr addr:$src1, VR128:$src2)>;
1233 def : Pat<(store (v4i32 (X86Movlps
1234 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1236 (MOVLPSmr addr:$src1, VR128:$src2)>;
1239 let Predicates = [UseSSE2] in {
1240 // Shuffle with MOVLPD
1241 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1242 (MOVLPDrm VR128:$src1, addr:$src2)>;
1243 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1244 (MOVLPDrm VR128:$src1, addr:$src2)>;
1247 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1249 (MOVLPDmr addr:$src1, VR128:$src2)>;
1250 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1252 (MOVLPDmr addr:$src1, VR128:$src2)>;
1255 //===----------------------------------------------------------------------===//
1256 // SSE 1 & 2 - Move Hi packed FP Instructions
1257 //===----------------------------------------------------------------------===//
1259 let AddedComplexity = 20 in {
1260 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, X86Movlhps, X86Movlhpd, "movhp",
1261 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1262 IIC_SSE_MOV_LH>, VEX_4V;
1264 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1265 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, X86Movlhps, X86Movlhpd, "movhp",
1266 "\t{$src2, $dst|$dst, $src2}",
1270 // v2f64 extract element 1 is always custom lowered to unpack high to low
1271 // and extract element 0 so the non-store version isn't too horrible.
1272 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1273 "movhps\t{$src, $dst|$dst, $src}",
1274 [(store (f64 (vector_extract
1275 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1276 (bc_v2f64 (v4f32 VR128:$src))),
1277 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1278 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1279 "movhpd\t{$src, $dst|$dst, $src}",
1280 [(store (f64 (vector_extract
1281 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1282 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1283 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1284 "movhps\t{$src, $dst|$dst, $src}",
1285 [(store (f64 (vector_extract
1286 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1287 (bc_v2f64 (v4f32 VR128:$src))),
1288 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1289 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1290 "movhpd\t{$src, $dst|$dst, $src}",
1291 [(store (f64 (vector_extract
1292 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1293 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1295 let Predicates = [HasAVX] in {
1297 def : Pat<(X86Movlhps VR128:$src1,
1298 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1299 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1300 def : Pat<(X86Movlhps VR128:$src1,
1301 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1302 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1304 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1305 // is during lowering, where it's not possible to recognize the load fold
1306 // cause it has two uses through a bitcast. One use disappears at isel time
1307 // and the fold opportunity reappears.
1308 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1309 (scalar_to_vector (loadf64 addr:$src2)))),
1310 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1313 let Predicates = [UseSSE1] in {
1315 def : Pat<(X86Movlhps VR128:$src1,
1316 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1317 (MOVHPSrm VR128:$src1, addr:$src2)>;
1318 def : Pat<(X86Movlhps VR128:$src1,
1319 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1320 (MOVHPSrm VR128:$src1, addr:$src2)>;
1323 let Predicates = [UseSSE2] in {
1324 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1325 // is during lowering, where it's not possible to recognize the load fold
1326 // cause it has two uses through a bitcast. One use disappears at isel time
1327 // and the fold opportunity reappears.
1328 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1329 (scalar_to_vector (loadf64 addr:$src2)))),
1330 (MOVHPDrm VR128:$src1, addr:$src2)>;
1333 //===----------------------------------------------------------------------===//
1334 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1335 //===----------------------------------------------------------------------===//
1337 let AddedComplexity = 20 in {
1338 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1339 (ins VR128:$src1, VR128:$src2),
1340 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1342 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1345 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1346 (ins VR128:$src1, VR128:$src2),
1347 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1349 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1353 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1354 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1355 (ins VR128:$src1, VR128:$src2),
1356 "movlhps\t{$src2, $dst|$dst, $src2}",
1358 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1360 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1361 (ins VR128:$src1, VR128:$src2),
1362 "movhlps\t{$src2, $dst|$dst, $src2}",
1364 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1368 let Predicates = [HasAVX] in {
1370 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1371 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1372 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1373 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1376 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1377 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1380 let Predicates = [UseSSE1] in {
1382 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1383 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1384 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1385 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1388 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1389 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1392 //===----------------------------------------------------------------------===//
1393 // SSE 1 & 2 - Conversion Instructions
1394 //===----------------------------------------------------------------------===//
1396 def SSE_CVT_PD : OpndItins<
1397 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1400 def SSE_CVT_PS : OpndItins<
1401 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1404 def SSE_CVT_Scalar : OpndItins<
1405 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1408 def SSE_CVT_SS2SI_32 : OpndItins<
1409 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1412 def SSE_CVT_SS2SI_64 : OpndItins<
1413 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1416 def SSE_CVT_SD2SI : OpndItins<
1417 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1420 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1421 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1422 string asm, OpndItins itins> {
1423 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1424 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1426 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1427 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1431 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1432 X86MemOperand x86memop, string asm, Domain d,
1434 let neverHasSideEffects = 1 in {
1435 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1438 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1443 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1444 X86MemOperand x86memop, string asm> {
1445 let neverHasSideEffects = 1 in {
1446 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1447 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1449 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1450 (ins DstRC:$src1, x86memop:$src),
1451 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1452 } // neverHasSideEffects = 1
1455 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1456 "cvttss2si\t{$src, $dst|$dst, $src}",
1459 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1460 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1462 XS, VEX, VEX_W, VEX_LIG;
1463 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1464 "cvttsd2si\t{$src, $dst|$dst, $src}",
1467 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1468 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1470 XD, VEX, VEX_W, VEX_LIG;
1472 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1473 // register, but the same isn't true when only using memory operands,
1474 // provide other assembly "l" and "q" forms to address this explicitly
1475 // where appropriate to do so.
1476 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">,
1477 XS, VEX_4V, VEX_LIG;
1478 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1479 XS, VEX_4V, VEX_W, VEX_LIG;
1480 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">,
1481 XD, VEX_4V, VEX_LIG;
1482 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1483 XD, VEX_4V, VEX_W, VEX_LIG;
1485 def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
1486 (VCVTSI2SDrr FR64:$dst, FR64:$src1, GR32:$src)>;
1487 def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
1488 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src)>;
1490 let Predicates = [HasAVX] in {
1491 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1492 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1493 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1494 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1495 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1496 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1497 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1498 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1500 def : Pat<(f32 (sint_to_fp GR32:$src)),
1501 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1502 def : Pat<(f32 (sint_to_fp GR64:$src)),
1503 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1504 def : Pat<(f64 (sint_to_fp GR32:$src)),
1505 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1506 def : Pat<(f64 (sint_to_fp GR64:$src)),
1507 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1510 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1511 "cvttss2si\t{$src, $dst|$dst, $src}",
1512 SSE_CVT_SS2SI_32>, XS;
1513 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1514 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1515 SSE_CVT_SS2SI_64>, XS, REX_W;
1516 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1517 "cvttsd2si\t{$src, $dst|$dst, $src}",
1519 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1520 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1521 SSE_CVT_SD2SI>, XD, REX_W;
1522 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1523 "cvtsi2ss\t{$src, $dst|$dst, $src}",
1524 SSE_CVT_Scalar>, XS;
1525 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1526 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1527 SSE_CVT_Scalar>, XS, REX_W;
1528 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1529 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1530 SSE_CVT_Scalar>, XD;
1531 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1532 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1533 SSE_CVT_Scalar>, XD, REX_W;
1535 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1536 // and/or XMM operand(s).
1538 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1539 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1540 string asm, OpndItins itins> {
1541 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1542 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1543 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>;
1544 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1545 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1546 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>;
1549 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1550 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1551 PatFrag ld_frag, string asm, OpndItins itins,
1553 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1555 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1556 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1557 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1559 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1560 (ins DstRC:$src1, x86memop:$src2),
1562 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1563 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1564 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1568 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1569 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si{l}",
1570 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1571 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1572 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si{q}",
1573 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1575 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1576 sdmem, sse_load_f64, "cvtsd2si{l}", SSE_CVT_SD2SI>, XD;
1577 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1578 sdmem, sse_load_f64, "cvtsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
1581 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1582 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss",
1583 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1584 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1585 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1586 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1588 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1589 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd",
1590 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1591 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1592 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1593 SSE_CVT_Scalar, 0>, XD,
1596 let Constraints = "$src1 = $dst" in {
1597 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1598 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1599 "cvtsi2ss", SSE_CVT_Scalar>, XS;
1600 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1601 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1602 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1603 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1604 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1605 "cvtsi2sd", SSE_CVT_Scalar>, XD;
1606 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1607 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1608 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1613 // Aliases for intrinsics
1614 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1615 ssmem, sse_load_f32, "cvttss2si",
1616 SSE_CVT_SS2SI_32>, XS, VEX;
1617 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1618 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1619 "cvttss2si{q}", SSE_CVT_SS2SI_64>,
1621 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1622 sdmem, sse_load_f64, "cvttsd2si",
1623 SSE_CVT_SD2SI>, XD, VEX;
1624 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1625 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1626 "cvttsd2si{q}", SSE_CVT_SD2SI>,
1628 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1629 ssmem, sse_load_f32, "cvttss2si",
1630 SSE_CVT_SS2SI_32>, XS;
1631 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1632 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1633 "cvttss2si{q}", SSE_CVT_SS2SI_64>, XS, REX_W;
1634 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1635 sdmem, sse_load_f64, "cvttsd2si",
1637 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1638 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1639 "cvttsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
1641 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1642 ssmem, sse_load_f32, "cvtss2si{l}",
1643 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1644 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1645 ssmem, sse_load_f32, "cvtss2si{q}",
1646 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1648 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1649 ssmem, sse_load_f32, "cvtss2si{l}",
1650 SSE_CVT_SS2SI_32>, XS;
1651 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1652 ssmem, sse_load_f32, "cvtss2si{q}",
1653 SSE_CVT_SS2SI_64>, XS, REX_W;
1655 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1656 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1657 SSEPackedSingle, SSE_CVT_PS>,
1658 TB, VEX, Requires<[HasAVX]>;
1659 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1660 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1661 SSEPackedSingle, SSE_CVT_PS>,
1662 TB, VEX, VEX_L, Requires<[HasAVX]>;
1664 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1665 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1666 SSEPackedSingle, SSE_CVT_PS>,
1667 TB, Requires<[UseSSE2]>;
1671 // Convert scalar double to scalar single
1672 let neverHasSideEffects = 1 in {
1673 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1674 (ins FR64:$src1, FR64:$src2),
1675 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1676 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG;
1678 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1679 (ins FR64:$src1, f64mem:$src2),
1680 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1681 [], IIC_SSE_CVT_Scalar_RM>,
1682 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG;
1685 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1688 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1689 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1690 [(set FR32:$dst, (fround FR64:$src))],
1691 IIC_SSE_CVT_Scalar_RR>;
1692 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1693 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1694 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1695 IIC_SSE_CVT_Scalar_RM>,
1697 Requires<[UseSSE2, OptForSize]>;
1699 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1700 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1701 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1703 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1704 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>;
1705 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1706 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1707 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1708 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1709 VR128:$src1, sse_load_f64:$src2))],
1710 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>;
1712 let Constraints = "$src1 = $dst" in {
1713 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1714 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1715 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1717 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1718 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>;
1719 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1720 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1721 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1722 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1723 VR128:$src1, sse_load_f64:$src2))],
1724 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>;
1727 // Convert scalar single to scalar double
1728 // SSE2 instructions with XS prefix
1729 let neverHasSideEffects = 1 in {
1730 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1731 (ins FR32:$src1, FR32:$src2),
1732 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1733 [], IIC_SSE_CVT_Scalar_RR>,
1734 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG;
1736 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1737 (ins FR32:$src1, f32mem:$src2),
1738 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1739 [], IIC_SSE_CVT_Scalar_RM>,
1740 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>;
1743 def : Pat<(f64 (fextend FR32:$src)),
1744 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>;
1745 def : Pat<(fextend (loadf32 addr:$src)),
1746 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>;
1748 def : Pat<(extloadf32 addr:$src),
1749 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1750 Requires<[HasAVX, OptForSize]>;
1751 def : Pat<(extloadf32 addr:$src),
1752 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1753 Requires<[HasAVX, OptForSpeed]>;
1755 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1756 "cvtss2sd\t{$src, $dst|$dst, $src}",
1757 [(set FR64:$dst, (fextend FR32:$src))],
1758 IIC_SSE_CVT_Scalar_RR>, XS,
1759 Requires<[UseSSE2]>;
1760 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1761 "cvtss2sd\t{$src, $dst|$dst, $src}",
1762 [(set FR64:$dst, (extloadf32 addr:$src))],
1763 IIC_SSE_CVT_Scalar_RM>, XS,
1764 Requires<[UseSSE2, OptForSize]>;
1766 // extload f32 -> f64. This matches load+fextend because we have a hack in
1767 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1769 // Since these loads aren't folded into the fextend, we have to match it
1771 def : Pat<(fextend (loadf32 addr:$src)),
1772 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1773 def : Pat<(extloadf32 addr:$src),
1774 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1776 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1777 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1778 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1780 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1781 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>;
1782 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1783 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1784 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1786 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1787 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>;
1788 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1789 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1790 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1791 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1793 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1794 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>;
1795 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1796 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1797 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1799 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1800 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>;
1803 // Convert packed single/double fp to doubleword
1804 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1805 "cvtps2dq\t{$src, $dst|$dst, $src}",
1806 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1807 IIC_SSE_CVT_PS_RR>, VEX;
1808 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1809 "cvtps2dq\t{$src, $dst|$dst, $src}",
1811 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1812 IIC_SSE_CVT_PS_RM>, VEX;
1813 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1814 "cvtps2dq\t{$src, $dst|$dst, $src}",
1816 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1817 IIC_SSE_CVT_PS_RR>, VEX, VEX_L;
1818 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1819 "cvtps2dq\t{$src, $dst|$dst, $src}",
1821 (int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)))],
1822 IIC_SSE_CVT_PS_RM>, VEX, VEX_L;
1823 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1824 "cvtps2dq\t{$src, $dst|$dst, $src}",
1825 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1827 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1828 "cvtps2dq\t{$src, $dst|$dst, $src}",
1830 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1834 // Convert Packed Double FP to Packed DW Integers
1835 let Predicates = [HasAVX] in {
1836 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1837 // register, but the same isn't true when using memory operands instead.
1838 // Provide other assembly rr and rm forms to address this explicitly.
1839 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1840 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1841 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1845 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1846 (VCVTPD2DQrr VR128:$dst, VR128:$src)>;
1847 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1848 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1850 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))]>, VEX;
1853 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1854 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1856 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L;
1857 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1858 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1860 (int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)))]>,
1862 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
1863 (VCVTPD2DQYrr VR128:$dst, VR256:$src)>;
1866 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1867 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1869 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
1871 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1872 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1873 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
1876 // Convert with truncation packed single/double fp to doubleword
1877 // SSE2 packed instructions with XS prefix
1878 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1879 "cvttps2dq\t{$src, $dst|$dst, $src}",
1881 (int_x86_sse2_cvttps2dq VR128:$src))],
1882 IIC_SSE_CVT_PS_RR>, VEX;
1883 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1884 "cvttps2dq\t{$src, $dst|$dst, $src}",
1885 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1886 (memopv4f32 addr:$src)))],
1887 IIC_SSE_CVT_PS_RM>, VEX;
1888 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1889 "cvttps2dq\t{$src, $dst|$dst, $src}",
1891 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
1892 IIC_SSE_CVT_PS_RR>, VEX, VEX_L;
1893 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1894 "cvttps2dq\t{$src, $dst|$dst, $src}",
1895 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
1896 (memopv8f32 addr:$src)))],
1897 IIC_SSE_CVT_PS_RM>, VEX, VEX_L;
1899 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1900 "cvttps2dq\t{$src, $dst|$dst, $src}",
1901 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
1903 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1904 "cvttps2dq\t{$src, $dst|$dst, $src}",
1906 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
1909 let Predicates = [HasAVX] in {
1910 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1911 (VCVTDQ2PSrr VR128:$src)>;
1912 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
1913 (VCVTDQ2PSrm addr:$src)>;
1915 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
1916 (VCVTDQ2PSrr VR128:$src)>;
1917 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
1918 (VCVTDQ2PSrm addr:$src)>;
1920 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1921 (VCVTTPS2DQrr VR128:$src)>;
1922 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
1923 (VCVTTPS2DQrm addr:$src)>;
1925 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1926 (VCVTDQ2PSYrr VR256:$src)>;
1927 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (memopv4i64 addr:$src)))),
1928 (VCVTDQ2PSYrm addr:$src)>;
1930 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1931 (VCVTTPS2DQYrr VR256:$src)>;
1932 def : Pat<(v8i32 (fp_to_sint (memopv8f32 addr:$src))),
1933 (VCVTTPS2DQYrm addr:$src)>;
1936 let Predicates = [UseSSE2] in {
1937 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1938 (CVTDQ2PSrr VR128:$src)>;
1939 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
1940 (CVTDQ2PSrm addr:$src)>;
1942 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
1943 (CVTDQ2PSrr VR128:$src)>;
1944 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
1945 (CVTDQ2PSrm addr:$src)>;
1947 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1948 (CVTTPS2DQrr VR128:$src)>;
1949 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
1950 (CVTTPS2DQrm addr:$src)>;
1953 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1954 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1956 (int_x86_sse2_cvttpd2dq VR128:$src))],
1957 IIC_SSE_CVT_PD_RR>, VEX;
1959 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1960 // register, but the same isn't true when using memory operands instead.
1961 // Provide other assembly rr and rm forms to address this explicitly.
1964 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
1965 (VCVTTPD2DQrr VR128:$dst, VR128:$src)>;
1966 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1967 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
1968 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1969 (memopv2f64 addr:$src)))],
1970 IIC_SSE_CVT_PD_RM>, VEX;
1973 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1974 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
1976 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
1977 IIC_SSE_CVT_PD_RR>, VEX, VEX_L;
1978 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1979 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
1981 (int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)))],
1982 IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
1983 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
1984 (VCVTTPD2DQYrr VR128:$dst, VR256:$src)>;
1986 let Predicates = [HasAVX] in {
1987 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
1988 (VCVTTPD2DQYrr VR256:$src)>;
1989 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
1990 (VCVTTPD2DQYrm addr:$src)>;
1991 } // Predicates = [HasAVX]
1993 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1994 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1995 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
1997 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1998 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1999 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2000 (memopv2f64 addr:$src)))],
2003 // Convert packed single to packed double
2004 let Predicates = [HasAVX] in {
2005 // SSE2 instructions without OpSize prefix
2006 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2007 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2008 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2009 IIC_SSE_CVT_PD_RR>, TB, VEX;
2010 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2011 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2012 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2013 IIC_SSE_CVT_PD_RM>, TB, VEX;
2014 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2015 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2017 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
2018 IIC_SSE_CVT_PD_RR>, TB, VEX, VEX_L;
2019 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2020 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2022 (int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)))],
2023 IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L;
2026 let Predicates = [UseSSE2] in {
2027 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2028 "cvtps2pd\t{$src, $dst|$dst, $src}",
2029 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2030 IIC_SSE_CVT_PD_RR>, TB;
2031 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2032 "cvtps2pd\t{$src, $dst|$dst, $src}",
2033 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2034 IIC_SSE_CVT_PD_RM>, TB;
2037 // Convert Packed DW Integers to Packed Double FP
2038 let Predicates = [HasAVX] in {
2039 let neverHasSideEffects = 1, mayLoad = 1 in
2040 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2041 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2043 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2044 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2046 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX;
2047 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2048 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2050 (int_x86_avx_cvtdq2_pd_256
2051 (bitconvert (memopv2i64 addr:$src))))]>, VEX, VEX_L;
2052 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2053 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2055 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L;
2058 let neverHasSideEffects = 1, mayLoad = 1 in
2059 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2060 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2062 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2063 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2064 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2067 // AVX 256-bit register conversion intrinsics
2068 let Predicates = [HasAVX] in {
2069 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2070 (VCVTDQ2PDYrr VR128:$src)>;
2071 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2072 (VCVTDQ2PDYrm addr:$src)>;
2073 } // Predicates = [HasAVX]
2075 // Convert packed double to packed single
2076 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2077 // register, but the same isn't true when using memory operands instead.
2078 // Provide other assembly rr and rm forms to address this explicitly.
2079 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2080 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2081 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2082 IIC_SSE_CVT_PD_RR>, VEX;
2085 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2086 (VCVTPD2PSrr VR128:$dst, VR128:$src)>;
2087 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2088 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2090 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2091 IIC_SSE_CVT_PD_RM>, VEX;
2094 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2095 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2097 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2098 IIC_SSE_CVT_PD_RR>, VEX, VEX_L;
2099 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2100 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2102 (int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)))],
2103 IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
2104 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2105 (VCVTPD2PSYrr VR128:$dst, VR256:$src)>;
2107 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2108 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2109 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2111 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2112 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2114 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2118 // AVX 256-bit register conversion intrinsics
2119 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2120 // whenever possible to avoid declaring two versions of each one.
2121 let Predicates = [HasAVX] in {
2122 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2123 (VCVTDQ2PSYrr VR256:$src)>;
2124 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (memopv4i64 addr:$src))),
2125 (VCVTDQ2PSYrm addr:$src)>;
2127 // Match fround and fextend for 128/256-bit conversions
2128 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2129 (VCVTPD2PSrr VR128:$src)>;
2130 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2131 (VCVTPD2PSXrm addr:$src)>;
2132 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2133 (VCVTPD2PSYrr VR256:$src)>;
2134 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2135 (VCVTPD2PSYrm addr:$src)>;
2137 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2138 (VCVTPS2PDrr VR128:$src)>;
2139 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2140 (VCVTPS2PDYrr VR128:$src)>;
2141 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2142 (VCVTPS2PDYrm addr:$src)>;
2145 let Predicates = [UseSSE2] in {
2146 // Match fround and fextend for 128 conversions
2147 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2148 (CVTPD2PSrr VR128:$src)>;
2149 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2150 (CVTPD2PSrm addr:$src)>;
2152 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2153 (CVTPS2PDrr VR128:$src)>;
2156 //===----------------------------------------------------------------------===//
2157 // SSE 1 & 2 - Compare Instructions
2158 //===----------------------------------------------------------------------===//
2160 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2161 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2162 Operand CC, SDNode OpNode, ValueType VT,
2163 PatFrag ld_frag, string asm, string asm_alt,
2165 def rr : SIi8<0xC2, MRMSrcReg,
2166 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2167 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
2169 def rm : SIi8<0xC2, MRMSrcMem,
2170 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2171 [(set RC:$dst, (OpNode (VT RC:$src1),
2172 (ld_frag addr:$src2), imm:$cc))],
2175 // Accept explicit immediate argument form instead of comparison code.
2176 let neverHasSideEffects = 1 in {
2177 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2178 (ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, [],
2179 IIC_SSE_ALU_F32S_RR>;
2181 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2182 (ins RC:$src1, x86memop:$src2, i8imm:$cc), asm_alt, [],
2183 IIC_SSE_ALU_F32S_RM>;
2187 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmpss, f32, loadf32,
2188 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2189 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2191 XS, VEX_4V, VEX_LIG;
2192 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmpsd, f64, loadf64,
2193 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2194 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2195 SSE_ALU_F32S>, // same latency as 32 bit compare
2196 XD, VEX_4V, VEX_LIG;
2198 let Constraints = "$src1 = $dst" in {
2199 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmpss, f32, loadf32,
2200 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2201 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S>,
2203 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmpsd, f64, loadf64,
2204 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2205 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2206 SSE_ALU_F32S>, // same latency as 32 bit compare
2210 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2211 Intrinsic Int, string asm, OpndItins itins> {
2212 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2213 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2214 [(set VR128:$dst, (Int VR128:$src1,
2215 VR128:$src, imm:$cc))],
2217 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2218 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2219 [(set VR128:$dst, (Int VR128:$src1,
2220 (load addr:$src), imm:$cc))],
2224 // Aliases to match intrinsics which expect XMM operand(s).
2225 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2226 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2229 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2230 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2231 SSE_ALU_F32S>, // same latency as f32
2233 let Constraints = "$src1 = $dst" in {
2234 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2235 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2237 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2238 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2239 SSE_ALU_F32S>, // same latency as f32
2244 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2245 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2246 ValueType vt, X86MemOperand x86memop,
2247 PatFrag ld_frag, string OpcodeStr, Domain d> {
2248 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2249 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2250 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2251 IIC_SSE_COMIS_RR, d>;
2252 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2253 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2254 [(set EFLAGS, (OpNode (vt RC:$src1),
2255 (ld_frag addr:$src2)))],
2256 IIC_SSE_COMIS_RM, d>;
2259 let Defs = [EFLAGS] in {
2260 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2261 "ucomiss", SSEPackedSingle>, TB, VEX, VEX_LIG;
2262 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2263 "ucomisd", SSEPackedDouble>, TB, OpSize, VEX,
2265 let Pattern = []<dag> in {
2266 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2267 "comiss", SSEPackedSingle>, TB, VEX,
2269 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2270 "comisd", SSEPackedDouble>, TB, OpSize, VEX,
2274 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2275 load, "ucomiss", SSEPackedSingle>, TB, VEX;
2276 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2277 load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
2279 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2280 load, "comiss", SSEPackedSingle>, TB, VEX;
2281 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2282 load, "comisd", SSEPackedDouble>, TB, OpSize, VEX;
2283 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2284 "ucomiss", SSEPackedSingle>, TB;
2285 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2286 "ucomisd", SSEPackedDouble>, TB, OpSize;
2288 let Pattern = []<dag> in {
2289 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2290 "comiss", SSEPackedSingle>, TB;
2291 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2292 "comisd", SSEPackedDouble>, TB, OpSize;
2295 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2296 load, "ucomiss", SSEPackedSingle>, TB;
2297 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2298 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
2300 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2301 "comiss", SSEPackedSingle>, TB;
2302 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2303 "comisd", SSEPackedDouble>, TB, OpSize;
2304 } // Defs = [EFLAGS]
2306 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2307 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2308 Operand CC, Intrinsic Int, string asm,
2309 string asm_alt, Domain d> {
2310 def rri : PIi8<0xC2, MRMSrcReg,
2311 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2312 [(set RC:$dst, (Int RC:$src1, RC:$src2, imm:$cc))],
2313 IIC_SSE_CMPP_RR, d>;
2314 def rmi : PIi8<0xC2, MRMSrcMem,
2315 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2316 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2), imm:$cc))],
2317 IIC_SSE_CMPP_RM, d>;
2319 // Accept explicit immediate argument form instead of comparison code.
2320 let neverHasSideEffects = 1 in {
2321 def rri_alt : PIi8<0xC2, MRMSrcReg,
2322 (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
2323 asm_alt, [], IIC_SSE_CMPP_RR, d>;
2324 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2325 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
2326 asm_alt, [], IIC_SSE_CMPP_RM, d>;
2330 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2331 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2332 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2333 SSEPackedSingle>, TB, VEX_4V;
2334 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2335 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2336 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2337 SSEPackedDouble>, TB, OpSize, VEX_4V;
2338 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2339 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2340 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2341 SSEPackedSingle>, TB, VEX_4V, VEX_L;
2342 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2343 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2344 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2345 SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
2346 let Constraints = "$src1 = $dst" in {
2347 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2348 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2349 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2350 SSEPackedSingle>, TB;
2351 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2352 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2353 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2354 SSEPackedDouble>, TB, OpSize;
2357 let Predicates = [HasAVX] in {
2358 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2359 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2360 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2361 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2362 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2363 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2364 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2365 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2367 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2368 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2369 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
2370 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2371 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2372 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2373 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
2374 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2377 let Predicates = [UseSSE1] in {
2378 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2379 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2380 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2381 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2384 let Predicates = [UseSSE2] in {
2385 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2386 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2387 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2388 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2391 //===----------------------------------------------------------------------===//
2392 // SSE 1 & 2 - Shuffle Instructions
2393 //===----------------------------------------------------------------------===//
2395 /// sse12_shuffle - sse 1 & 2 shuffle instructions
2396 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2397 ValueType vt, string asm, PatFrag mem_frag,
2398 Domain d, bit IsConvertibleToThreeAddress = 0> {
2399 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2400 (ins RC:$src1, x86memop:$src2, i8imm:$src3), asm,
2401 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2402 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>;
2403 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
2404 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2405 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
2406 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2407 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>;
2410 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2411 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2412 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
2413 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2414 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2415 memopv8f32, SSEPackedSingle>, TB, VEX_4V, VEX_L;
2416 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2417 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2418 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
2419 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2420 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2421 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
2423 let Constraints = "$src1 = $dst" in {
2424 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2425 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2426 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
2428 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2429 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2430 memopv2f64, SSEPackedDouble, 1 /* cvt to pshufd */>,
2434 let Predicates = [HasAVX] in {
2435 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2436 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2437 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2438 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2439 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2441 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2442 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2443 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2444 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2445 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2448 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2449 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2450 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2451 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
2452 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2454 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2455 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2456 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2457 (memopv4i64 addr:$src2), (i8 imm:$imm))),
2458 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2461 let Predicates = [UseSSE1] in {
2462 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2463 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2464 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2465 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2466 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2469 let Predicates = [UseSSE2] in {
2470 // Generic SHUFPD patterns
2471 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2472 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2473 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2474 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2475 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2478 //===----------------------------------------------------------------------===//
2479 // SSE 1 & 2 - Unpack Instructions
2480 //===----------------------------------------------------------------------===//
2482 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
2483 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2484 PatFrag mem_frag, RegisterClass RC,
2485 X86MemOperand x86memop, string asm,
2487 def rr : PI<opc, MRMSrcReg,
2488 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2490 (vt (OpNode RC:$src1, RC:$src2)))],
2492 def rm : PI<opc, MRMSrcMem,
2493 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2495 (vt (OpNode RC:$src1,
2496 (mem_frag addr:$src2))))],
2500 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2501 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2502 SSEPackedSingle>, TB, VEX_4V;
2503 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2504 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2505 SSEPackedDouble>, TB, OpSize, VEX_4V;
2506 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2507 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2508 SSEPackedSingle>, TB, VEX_4V;
2509 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2510 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2511 SSEPackedDouble>, TB, OpSize, VEX_4V;
2513 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, memopv8f32,
2514 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2515 SSEPackedSingle>, TB, VEX_4V, VEX_L;
2516 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, memopv4f64,
2517 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2518 SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
2519 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, memopv8f32,
2520 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2521 SSEPackedSingle>, TB, VEX_4V, VEX_L;
2522 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, memopv4f64,
2523 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2524 SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
2526 let Constraints = "$src1 = $dst" in {
2527 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2528 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2529 SSEPackedSingle>, TB;
2530 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2531 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2532 SSEPackedDouble>, TB, OpSize;
2533 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2534 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2535 SSEPackedSingle>, TB;
2536 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2537 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2538 SSEPackedDouble>, TB, OpSize;
2539 } // Constraints = "$src1 = $dst"
2541 let Predicates = [HasAVX1Only] in {
2542 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
2543 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2544 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2545 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2546 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
2547 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2548 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2549 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2551 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))),
2552 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2553 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2554 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2555 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))),
2556 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2557 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2558 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2561 let Predicates = [HasAVX] in {
2562 // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
2563 // problem is during lowering, where it's not possible to recognize the load
2564 // fold cause it has two uses through a bitcast. One use disappears at isel
2565 // time and the fold opportunity reappears.
2566 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2567 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
2570 let Predicates = [UseSSE2] in {
2571 // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
2572 // problem is during lowering, where it's not possible to recognize the load
2573 // fold cause it has two uses through a bitcast. One use disappears at isel
2574 // time and the fold opportunity reappears.
2575 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2576 (UNPCKLPDrr VR128:$src, VR128:$src)>;
2579 //===----------------------------------------------------------------------===//
2580 // SSE 1 & 2 - Extract Floating-Point Sign mask
2581 //===----------------------------------------------------------------------===//
2583 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2584 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2586 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
2587 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2588 [(set GR32:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>;
2589 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
2590 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [],
2591 IIC_SSE_MOVMSK, d>, REX_W;
2594 let Predicates = [HasAVX] in {
2595 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2596 "movmskps", SSEPackedSingle>, TB, VEX;
2597 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2598 "movmskpd", SSEPackedDouble>, TB,
2600 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2601 "movmskps", SSEPackedSingle>, TB,
2603 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2604 "movmskpd", SSEPackedDouble>, TB,
2607 def : Pat<(i32 (X86fgetsign FR32:$src)),
2608 (VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>;
2609 def : Pat<(i64 (X86fgetsign FR32:$src)),
2610 (VMOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>;
2611 def : Pat<(i32 (X86fgetsign FR64:$src)),
2612 (VMOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>;
2613 def : Pat<(i64 (X86fgetsign FR64:$src)),
2614 (VMOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>;
2617 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2618 "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2619 SSEPackedSingle>, TB, VEX;
2620 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2621 "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2622 SSEPackedDouble>, TB,
2624 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2625 "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2626 SSEPackedSingle>, TB, VEX, VEX_L;
2627 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2628 "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2629 SSEPackedDouble>, TB,
2633 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2634 SSEPackedSingle>, TB;
2635 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2636 SSEPackedDouble>, TB, OpSize;
2638 def : Pat<(i32 (X86fgetsign FR32:$src)),
2639 (MOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>,
2640 Requires<[UseSSE1]>;
2641 def : Pat<(i64 (X86fgetsign FR32:$src)),
2642 (MOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>,
2643 Requires<[UseSSE1]>;
2644 def : Pat<(i32 (X86fgetsign FR64:$src)),
2645 (MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>,
2646 Requires<[UseSSE2]>;
2647 def : Pat<(i64 (X86fgetsign FR64:$src)),
2648 (MOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>,
2649 Requires<[UseSSE2]>;
2651 //===---------------------------------------------------------------------===//
2652 // SSE2 - Packed Integer Logical Instructions
2653 //===---------------------------------------------------------------------===//
2655 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2657 /// PDI_binop_rm - Simple SSE2 binary operator.
2658 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2659 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2660 X86MemOperand x86memop,
2662 bit IsCommutable = 0,
2664 let isCommutable = IsCommutable in
2665 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2666 (ins RC:$src1, RC:$src2),
2668 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2669 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2670 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>;
2671 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2672 (ins RC:$src1, x86memop:$src2),
2674 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2675 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2676 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2677 (bitconvert (memop_frag addr:$src2)))))],
2680 } // ExeDomain = SSEPackedInt
2682 // These are ordered here for pattern ordering requirements with the fp versions
2684 let Predicates = [HasAVX] in {
2685 defm VPAND : PDI_binop_rm<0xDB, "vpand", and, v2i64, VR128, memopv2i64,
2686 i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2687 defm VPOR : PDI_binop_rm<0xEB, "vpor" , or, v2i64, VR128, memopv2i64,
2688 i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2689 defm VPXOR : PDI_binop_rm<0xEF, "vpxor", xor, v2i64, VR128, memopv2i64,
2690 i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
2691 defm VPANDN : PDI_binop_rm<0xDF, "vpandn", X86andnp, v2i64, VR128, memopv2i64,
2692 i128mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V;
2695 let Constraints = "$src1 = $dst" in {
2696 defm PAND : PDI_binop_rm<0xDB, "pand", and, v2i64, VR128, memopv2i64,
2697 i128mem, SSE_BIT_ITINS_P, 1>;
2698 defm POR : PDI_binop_rm<0xEB, "por" , or, v2i64, VR128, memopv2i64,
2699 i128mem, SSE_BIT_ITINS_P, 1>;
2700 defm PXOR : PDI_binop_rm<0xEF, "pxor", xor, v2i64, VR128, memopv2i64,
2701 i128mem, SSE_BIT_ITINS_P, 1>;
2702 defm PANDN : PDI_binop_rm<0xDF, "pandn", X86andnp, v2i64, VR128, memopv2i64,
2703 i128mem, SSE_BIT_ITINS_P, 0>;
2704 } // Constraints = "$src1 = $dst"
2706 let Predicates = [HasAVX2] in {
2707 defm VPANDY : PDI_binop_rm<0xDB, "vpand", and, v4i64, VR256, memopv4i64,
2708 i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L;
2709 defm VPORY : PDI_binop_rm<0xEB, "vpor", or, v4i64, VR256, memopv4i64,
2710 i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L;
2711 defm VPXORY : PDI_binop_rm<0xEF, "vpxor", xor, v4i64, VR256, memopv4i64,
2712 i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L;
2713 defm VPANDNY : PDI_binop_rm<0xDF, "vpandn", X86andnp, v4i64, VR256, memopv4i64,
2714 i256mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V, VEX_L;
2717 //===----------------------------------------------------------------------===//
2718 // SSE 1 & 2 - Logical Instructions
2719 //===----------------------------------------------------------------------===//
2721 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
2723 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
2724 SDNode OpNode, OpndItins itins> {
2725 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2726 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, itins, 0>,
2729 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2730 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, itins, 0>,
2733 let Constraints = "$src1 = $dst" in {
2734 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2735 f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
2738 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2739 f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
2744 // Alias bitwise logical operations using SSE logical ops on packed FP values.
2745 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
2747 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
2749 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
2752 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
2753 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef,
2756 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2758 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2760 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2761 // are all promoted to v2i64, and the patterns are covered by the int
2762 // version. This is needed in SSE only, because v2i64 isn't supported on
2763 // SSE1, but only on SSE2.
2764 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2765 !strconcat(OpcodeStr, "ps"), f128mem, [],
2766 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2767 (memopv2i64 addr:$src2)))], 0, 1>, TB, VEX_4V;
2769 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2770 !strconcat(OpcodeStr, "pd"), f128mem,
2771 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2772 (bc_v2i64 (v2f64 VR128:$src2))))],
2773 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2774 (memopv2i64 addr:$src2)))], 0>,
2776 let Constraints = "$src1 = $dst" in {
2777 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2778 !strconcat(OpcodeStr, "ps"), f128mem,
2779 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2780 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2781 (memopv2i64 addr:$src2)))]>, TB;
2783 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2784 !strconcat(OpcodeStr, "pd"), f128mem,
2785 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2786 (bc_v2i64 (v2f64 VR128:$src2))))],
2787 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2788 (memopv2i64 addr:$src2)))]>, TB, OpSize;
2792 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
2794 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
2796 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2797 !strconcat(OpcodeStr, "ps"), f256mem,
2798 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2799 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2800 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V, VEX_L;
2802 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2803 !strconcat(OpcodeStr, "pd"), f256mem,
2804 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2805 (bc_v4i64 (v4f64 VR256:$src2))))],
2806 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2807 (memopv4i64 addr:$src2)))], 0>,
2808 TB, OpSize, VEX_4V, VEX_L;
2811 // AVX 256-bit packed logical ops forms
2812 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
2813 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
2814 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
2815 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
2817 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2818 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2819 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2820 let isCommutable = 0 in
2821 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2823 //===----------------------------------------------------------------------===//
2824 // SSE 1 & 2 - Arithmetic Instructions
2825 //===----------------------------------------------------------------------===//
2827 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2830 /// In addition, we also have a special variant of the scalar form here to
2831 /// represent the associated intrinsic operation. This form is unlike the
2832 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2833 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2835 /// These three forms can each be reg+reg or reg+mem.
2838 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2840 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2843 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2844 OpNode, FR32, f32mem,
2845 itins.s, Is2Addr>, XS;
2846 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2847 OpNode, FR64, f64mem,
2848 itins.d, Is2Addr>, XD;
2851 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
2854 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2855 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins.s, Is2Addr>,
2857 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2858 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins.d, Is2Addr>,
2862 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
2865 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
2866 v8f32, f256mem, memopv8f32, SSEPackedSingle, itins.s, 0>,
2868 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
2869 v4f64, f256mem, memopv4f64, SSEPackedDouble, itins.d, 0>,
2873 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2876 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2877 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
2878 itins.s, Is2Addr>, XS;
2879 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2880 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
2881 itins.d, Is2Addr>, XD;
2884 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
2887 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2888 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
2889 SSEPackedSingle, itins.s, Is2Addr>,
2892 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2893 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
2894 SSEPackedDouble, itins.d, Is2Addr>,
2898 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr,
2900 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2901 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
2902 SSEPackedSingle, itins.s, 0>, TB, VEX_L;
2904 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2905 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
2906 SSEPackedDouble, itins.d, 0>, TB, OpSize, VEX_L;
2909 // Binary Arithmetic instructions
2910 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S, 0>,
2911 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S, 0>,
2913 defm VADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P, 0>,
2914 basic_sse12_fp_binop_p_y<0x58, "add", fadd, SSE_ALU_ITINS_P>,
2916 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S, 0>,
2917 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S, 0>,
2919 defm VMUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P, 0>,
2920 basic_sse12_fp_binop_p_y<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
2923 let isCommutable = 0 in {
2924 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S, 0>,
2925 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S, 0>,
2927 defm VSUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P, 0>,
2928 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
2930 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S, 0>,
2931 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S, 0>,
2933 defm VDIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_ALU_ITINS_P, 0>,
2934 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
2936 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S, 0>,
2937 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S, 0>,
2939 defm VMAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P, 0>,
2940 basic_sse12_fp_binop_p_int<0x5F, "max", SSE_ALU_ITINS_P, 0>,
2941 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
2942 basic_sse12_fp_binop_p_y_int<0x5F, "max", SSE_ALU_ITINS_P>,
2944 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S, 0>,
2945 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S, 0>,
2947 defm VMIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P, 0>,
2948 basic_sse12_fp_binop_p_int<0x5D, "min", SSE_ALU_ITINS_P, 0>,
2949 basic_sse12_fp_binop_p_y_int<0x5D, "min", SSE_ALU_ITINS_P>,
2950 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
2954 let Constraints = "$src1 = $dst" in {
2955 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
2956 basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
2957 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
2958 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
2959 basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
2960 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
2962 let isCommutable = 0 in {
2963 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
2964 basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
2965 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
2966 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
2967 basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
2968 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
2969 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
2970 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
2971 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>,
2972 basic_sse12_fp_binop_p_int<0x5F, "max", SSE_ALU_ITINS_P>;
2973 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
2974 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
2975 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>,
2976 basic_sse12_fp_binop_p_int<0x5D, "min", SSE_ALU_ITINS_P>;
2980 let isCodeGenOnly = 1 in {
2981 defm VMAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S, 0>,
2983 defm VMAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P, 0>,
2984 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>, VEX_4V;
2985 defm VMINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S, 0>,
2987 defm VMINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P, 0>,
2988 basic_sse12_fp_binop_p_y<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>, VEX_4V;
2989 let Constraints = "$src1 = $dst" in {
2990 defm MAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>,
2991 basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>;
2992 defm MINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>,
2993 basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>;
2998 /// In addition, we also have a special variant of the scalar form here to
2999 /// represent the associated intrinsic operation. This form is unlike the
3000 /// plain scalar form, in that it takes an entire vector (instead of a
3001 /// scalar) and leaves the top elements undefined.
3003 /// And, we have a special variant form for a full-vector intrinsic form.
3005 def SSE_SQRTP : OpndItins<
3006 IIC_SSE_SQRTP_RR, IIC_SSE_SQRTP_RM
3009 def SSE_SQRTS : OpndItins<
3010 IIC_SSE_SQRTS_RR, IIC_SSE_SQRTS_RM
3013 def SSE_RCPP : OpndItins<
3014 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
3017 def SSE_RCPS : OpndItins<
3018 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
3021 /// sse1_fp_unop_s - SSE1 unops in scalar form.
3022 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
3023 SDNode OpNode, Intrinsic F32Int, OpndItins itins> {
3024 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
3025 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3026 [(set FR32:$dst, (OpNode FR32:$src))]>;
3027 // For scalar unary operations, fold a load into the operation
3028 // only in OptForSize mode. It eliminates an instruction, but it also
3029 // eliminates a whole-register clobber (the load), so it introduces a
3030 // partial register update condition.
3031 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
3032 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3033 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
3034 Requires<[UseSSE1, OptForSize]>;
3035 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3036 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3037 [(set VR128:$dst, (F32Int VR128:$src))], itins.rr>;
3038 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
3039 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3040 [(set VR128:$dst, (F32Int sse_load_f32:$src))], itins.rm>;
3043 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
3044 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
3045 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
3046 !strconcat(OpcodeStr,
3047 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3048 let mayLoad = 1 in {
3049 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
3050 !strconcat(OpcodeStr,
3051 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3052 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3053 (ins VR128:$src1, ssmem:$src2),
3054 !strconcat(OpcodeStr,
3055 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3059 /// sse1_fp_unop_p - SSE1 unops in packed form.
3060 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3062 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3063 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3064 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>;
3065 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3066 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3067 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>;
3070 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
3071 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
3073 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3074 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3075 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3077 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3078 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3079 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))],
3083 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
3084 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3085 Intrinsic V4F32Int, OpndItins itins> {
3086 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3087 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3088 [(set VR128:$dst, (V4F32Int VR128:$src))],
3090 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3091 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3092 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
3096 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
3097 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
3098 Intrinsic V4F32Int, OpndItins itins> {
3099 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3100 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3101 [(set VR256:$dst, (V4F32Int VR256:$src))],
3103 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3104 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3105 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))],
3109 /// sse2_fp_unop_s - SSE2 unops in scalar form.
3110 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
3111 SDNode OpNode, Intrinsic F64Int, OpndItins itins> {
3112 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
3113 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3114 [(set FR64:$dst, (OpNode FR64:$src))], itins.rr>;
3115 // See the comments in sse1_fp_unop_s for why this is OptForSize.
3116 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
3117 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3118 [(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
3119 Requires<[UseSSE2, OptForSize]>;
3120 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3121 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3122 [(set VR128:$dst, (F64Int VR128:$src))], itins.rr>;
3123 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
3124 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3125 [(set VR128:$dst, (F64Int sse_load_f64:$src))], itins.rm>;
3128 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
3129 let hasSideEffects = 0 in
3130 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
3131 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
3132 !strconcat(OpcodeStr,
3133 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3134 let mayLoad = 1 in {
3135 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
3136 !strconcat(OpcodeStr,
3137 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3138 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
3139 (ins VR128:$src1, sdmem:$src2),
3140 !strconcat(OpcodeStr,
3141 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3145 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3146 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3147 SDNode OpNode, OpndItins itins> {
3148 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3149 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3150 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>;
3151 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3152 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3153 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>;
3156 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
3157 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
3159 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3160 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3161 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3163 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3164 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3165 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))],
3169 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
3170 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3171 Intrinsic V2F64Int, OpndItins itins> {
3172 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3173 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3174 [(set VR128:$dst, (V2F64Int VR128:$src))],
3176 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3177 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3178 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))],
3182 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
3183 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
3184 Intrinsic V2F64Int, OpndItins itins> {
3185 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3186 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3187 [(set VR256:$dst, (V2F64Int VR256:$src))],
3189 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3190 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3191 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))],
3195 let Predicates = [HasAVX] in {
3197 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
3198 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V, VEX_LIG;
3200 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3201 sse2_fp_unop_p<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3202 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3203 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3204 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps,
3206 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd,
3208 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256,
3210 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256,
3214 // Reciprocal approximations. Note that these typically require refinement
3215 // in order to obtain suitable precision.
3216 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V, VEX_LIG;
3217 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt, SSE_SQRTP>,
3218 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt, SSE_SQRTP>,
3219 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256,
3221 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps,
3224 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V, VEX_LIG;
3225 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp, SSE_RCPP>,
3226 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp, SSE_RCPP>,
3227 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256,
3229 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps,
3233 def : Pat<(f32 (fsqrt FR32:$src)),
3234 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3235 def : Pat<(f32 (fsqrt (load addr:$src))),
3236 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3237 Requires<[HasAVX, OptForSize]>;
3238 def : Pat<(f64 (fsqrt FR64:$src)),
3239 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
3240 def : Pat<(f64 (fsqrt (load addr:$src))),
3241 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
3242 Requires<[HasAVX, OptForSize]>;
3244 def : Pat<(f32 (X86frsqrt FR32:$src)),
3245 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3246 def : Pat<(f32 (X86frsqrt (load addr:$src))),
3247 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3248 Requires<[HasAVX, OptForSize]>;
3250 def : Pat<(f32 (X86frcp FR32:$src)),
3251 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3252 def : Pat<(f32 (X86frcp (load addr:$src))),
3253 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3254 Requires<[HasAVX, OptForSize]>;
3256 let Predicates = [HasAVX] in {
3257 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
3258 (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
3259 (COPY_TO_REGCLASS VR128:$src, FR32)),
3261 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3262 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3264 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
3265 (COPY_TO_REGCLASS (VSQRTSDr (f64 (IMPLICIT_DEF)),
3266 (COPY_TO_REGCLASS VR128:$src, FR64)),
3268 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3269 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3271 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3272 (COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
3273 (COPY_TO_REGCLASS VR128:$src, FR32)),
3275 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
3276 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3278 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3279 (COPY_TO_REGCLASS (VRCPSSr (f32 (IMPLICIT_DEF)),
3280 (COPY_TO_REGCLASS VR128:$src, FR32)),
3282 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
3283 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3287 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss,
3289 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
3290 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps, SSE_SQRTS>,
3291 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd,
3293 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
3294 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd, SSE_SQRTS>;
3296 /// sse1_fp_unop_s_rw - SSE1 unops where vector form has a read-write operand.
3297 multiclass sse1_fp_unop_rw<bits<8> opc, string OpcodeStr, SDNode OpNode,
3298 Intrinsic F32Int, OpndItins itins> {
3299 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
3300 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3301 [(set FR32:$dst, (OpNode FR32:$src))]>;
3302 // For scalar unary operations, fold a load into the operation
3303 // only in OptForSize mode. It eliminates an instruction, but it also
3304 // eliminates a whole-register clobber (the load), so it introduces a
3305 // partial register update condition.
3306 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
3307 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3308 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
3309 Requires<[UseSSE1, OptForSize]>;
3310 let Constraints = "$src1 = $dst" in {
3311 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
3312 (ins VR128:$src1, VR128:$src2),
3313 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3315 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3316 (ins VR128:$src1, ssmem:$src2),
3317 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3322 // Reciprocal approximations. Note that these typically require refinement
3323 // in order to obtain suitable precision.
3324 defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss,
3326 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTS>,
3327 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
3329 let Predicates = [UseSSE1] in {
3330 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3331 (RSQRTSSr_Int VR128:$src, VR128:$src)>;
3334 defm RCP : sse1_fp_unop_rw<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss,
3336 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPS>,
3337 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps, SSE_RCPS>;
3338 let Predicates = [UseSSE1] in {
3339 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3340 (RCPSSr_Int VR128:$src, VR128:$src)>;
3343 // There is no f64 version of the reciprocal approximation instructions.
3345 //===----------------------------------------------------------------------===//
3346 // SSE 1 & 2 - Non-temporal stores
3347 //===----------------------------------------------------------------------===//
3349 let AddedComplexity = 400 in { // Prefer non-temporal versions
3350 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3351 (ins f128mem:$dst, VR128:$src),
3352 "movntps\t{$src, $dst|$dst, $src}",
3353 [(alignednontemporalstore (v4f32 VR128:$src),
3355 IIC_SSE_MOVNT>, VEX;
3356 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3357 (ins f128mem:$dst, VR128:$src),
3358 "movntpd\t{$src, $dst|$dst, $src}",
3359 [(alignednontemporalstore (v2f64 VR128:$src),
3361 IIC_SSE_MOVNT>, VEX;
3363 let ExeDomain = SSEPackedInt in
3364 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3365 (ins f128mem:$dst, VR128:$src),
3366 "movntdq\t{$src, $dst|$dst, $src}",
3367 [(alignednontemporalstore (v2i64 VR128:$src),
3369 IIC_SSE_MOVNT>, VEX;
3371 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3372 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
3374 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3375 (ins f256mem:$dst, VR256:$src),
3376 "movntps\t{$src, $dst|$dst, $src}",
3377 [(alignednontemporalstore (v8f32 VR256:$src),
3379 IIC_SSE_MOVNT>, VEX, VEX_L;
3380 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3381 (ins f256mem:$dst, VR256:$src),
3382 "movntpd\t{$src, $dst|$dst, $src}",
3383 [(alignednontemporalstore (v4f64 VR256:$src),
3385 IIC_SSE_MOVNT>, VEX, VEX_L;
3386 let ExeDomain = SSEPackedInt in
3387 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3388 (ins f256mem:$dst, VR256:$src),
3389 "movntdq\t{$src, $dst|$dst, $src}",
3390 [(alignednontemporalstore (v4i64 VR256:$src),
3392 IIC_SSE_MOVNT>, VEX, VEX_L;
3395 let AddedComplexity = 400 in { // Prefer non-temporal versions
3396 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3397 "movntps\t{$src, $dst|$dst, $src}",
3398 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3400 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3401 "movntpd\t{$src, $dst|$dst, $src}",
3402 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3405 let ExeDomain = SSEPackedInt in
3406 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3407 "movntdq\t{$src, $dst|$dst, $src}",
3408 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3411 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3412 (MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[UseSSE2]>;
3414 // There is no AVX form for instructions below this point
3415 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3416 "movnti{l}\t{$src, $dst|$dst, $src}",
3417 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3419 TB, Requires<[HasSSE2]>;
3420 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3421 "movnti{q}\t{$src, $dst|$dst, $src}",
3422 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3424 TB, Requires<[HasSSE2]>;
3427 //===----------------------------------------------------------------------===//
3428 // SSE 1 & 2 - Prefetch and memory fence
3429 //===----------------------------------------------------------------------===//
3431 // Prefetch intrinsic.
3432 let Predicates = [HasSSE1] in {
3433 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3434 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3435 IIC_SSE_PREFETCH>, TB;
3436 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3437 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3438 IIC_SSE_PREFETCH>, TB;
3439 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3440 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3441 IIC_SSE_PREFETCH>, TB;
3442 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3443 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3444 IIC_SSE_PREFETCH>, TB;
3448 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3449 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3450 IIC_SSE_PREFETCH>, TB, Requires<[HasSSE2]>;
3452 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3453 // was introduced with SSE2, it's backward compatible.
3454 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", [], IIC_SSE_PAUSE>, REP;
3456 // Load, store, and memory fence
3457 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3458 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
3459 TB, Requires<[HasSSE1]>;
3460 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3461 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
3462 TB, Requires<[HasSSE2]>;
3463 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3464 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
3465 TB, Requires<[HasSSE2]>;
3467 def : Pat<(X86SFence), (SFENCE)>;
3468 def : Pat<(X86LFence), (LFENCE)>;
3469 def : Pat<(X86MFence), (MFENCE)>;
3471 //===----------------------------------------------------------------------===//
3472 // SSE 1 & 2 - Load/Store XCSR register
3473 //===----------------------------------------------------------------------===//
3475 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3476 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3477 IIC_SSE_LDMXCSR>, VEX;
3478 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3479 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3480 IIC_SSE_STMXCSR>, VEX;
3482 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3483 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3485 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3486 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3489 //===---------------------------------------------------------------------===//
3490 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3491 //===---------------------------------------------------------------------===//
3493 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3495 let neverHasSideEffects = 1 in {
3496 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3497 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3499 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3500 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3503 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3504 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3506 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3507 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3511 let isCodeGenOnly = 1 in {
3512 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3513 "movdqa\t{$src, $dst|$dst, $src}", [],
3516 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3517 "movdqa\t{$src, $dst|$dst, $src}", [],
3518 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
3519 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3520 "movdqu\t{$src, $dst|$dst, $src}", [],
3523 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3524 "movdqu\t{$src, $dst|$dst, $src}", [],
3525 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
3528 let canFoldAsLoad = 1, mayLoad = 1 in {
3529 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3530 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3532 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3533 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3535 let Predicates = [HasAVX] in {
3536 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3537 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3539 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3540 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3545 let mayStore = 1 in {
3546 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3547 (ins i128mem:$dst, VR128:$src),
3548 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3550 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3551 (ins i256mem:$dst, VR256:$src),
3552 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3554 let Predicates = [HasAVX] in {
3555 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3556 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3558 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3559 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3564 let neverHasSideEffects = 1 in
3565 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3566 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
3568 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3569 "movdqu\t{$src, $dst|$dst, $src}",
3570 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3573 let isCodeGenOnly = 1 in {
3574 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3575 "movdqa\t{$src, $dst|$dst, $src}", [],
3578 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3579 "movdqu\t{$src, $dst|$dst, $src}",
3580 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3583 let canFoldAsLoad = 1, mayLoad = 1 in {
3584 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3585 "movdqa\t{$src, $dst|$dst, $src}",
3586 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
3588 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3589 "movdqu\t{$src, $dst|$dst, $src}",
3590 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
3592 XS, Requires<[UseSSE2]>;
3595 let mayStore = 1 in {
3596 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3597 "movdqa\t{$src, $dst|$dst, $src}",
3598 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
3600 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3601 "movdqu\t{$src, $dst|$dst, $src}",
3602 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
3604 XS, Requires<[UseSSE2]>;
3607 // Intrinsic forms of MOVDQU load and store
3608 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3609 "vmovdqu\t{$src, $dst|$dst, $src}",
3610 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)],
3612 XS, VEX, Requires<[HasAVX]>;
3614 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3615 "movdqu\t{$src, $dst|$dst, $src}",
3616 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)],
3618 XS, Requires<[UseSSE2]>;
3620 } // ExeDomain = SSEPackedInt
3622 let Predicates = [HasAVX] in {
3623 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3624 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3627 //===---------------------------------------------------------------------===//
3628 // SSE2 - Packed Integer Arithmetic Instructions
3629 //===---------------------------------------------------------------------===//
3631 def SSE_PMADD : OpndItins<
3632 IIC_SSE_PMADD, IIC_SSE_PMADD
3635 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3637 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3638 RegisterClass RC, PatFrag memop_frag,
3639 X86MemOperand x86memop,
3641 bit IsCommutable = 0,
3643 let isCommutable = IsCommutable in
3644 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3645 (ins RC:$src1, RC:$src2),
3647 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3648 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3649 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>;
3650 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3651 (ins RC:$src1, x86memop:$src2),
3653 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3654 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3655 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
3659 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3660 string OpcodeStr, SDNode OpNode,
3661 SDNode OpNode2, RegisterClass RC,
3662 ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
3663 ShiftOpndItins itins,
3665 // src2 is always 128-bit
3666 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3667 (ins RC:$src1, VR128:$src2),
3669 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3670 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3671 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
3673 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3674 (ins RC:$src1, i128mem:$src2),
3676 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3677 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3678 [(set RC:$dst, (DstVT (OpNode RC:$src1,
3679 (bc_frag (memopv2i64 addr:$src2)))))], itins.rm>;
3680 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
3681 (ins RC:$src1, i32i8imm:$src2),
3683 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3684 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3685 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i32 imm:$src2))))], itins.ri>;
3688 /// PDI_binop_rm - Simple SSE2 binary operator with different src and dst types
3689 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
3690 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
3691 PatFrag memop_frag, X86MemOperand x86memop,
3693 bit IsCommutable = 0, bit Is2Addr = 1> {
3694 let isCommutable = IsCommutable in
3695 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3696 (ins RC:$src1, RC:$src2),
3698 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3699 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3700 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>;
3701 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3702 (ins RC:$src1, x86memop:$src2),
3704 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3705 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3706 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
3707 (bitconvert (memop_frag addr:$src2)))))]>;
3709 } // ExeDomain = SSEPackedInt
3711 // 128-bit Integer Arithmetic
3713 let Predicates = [HasAVX] in {
3714 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, VR128, memopv2i64,
3715 i128mem, SSE_INTALU_ITINS_P, 1, 0 /*3addr*/>,
3717 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, VR128, memopv2i64,
3718 i128mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3719 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, VR128, memopv2i64,
3720 i128mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3721 defm VPADDQ : PDI_binop_rm<0xD4, "vpaddq", add, v2i64, VR128, memopv2i64,
3722 i128mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V;
3723 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, VR128, memopv2i64,
3724 i128mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3725 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, VR128, memopv2i64,
3726 i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3727 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, VR128, memopv2i64,
3728 i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3729 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, VR128, memopv2i64,
3730 i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3731 defm VPSUBQ : PDI_binop_rm<0xFB, "vpsubq", sub, v2i64, VR128, memopv2i64,
3732 i128mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V;
3733 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
3734 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
3738 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b,
3739 VR128, memopv2i64, i128mem,
3740 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3741 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w,
3742 VR128, memopv2i64, i128mem,
3743 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3744 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b,
3745 VR128, memopv2i64, i128mem,
3746 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3747 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w,
3748 VR128, memopv2i64, i128mem,
3749 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3750 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b,
3751 VR128, memopv2i64, i128mem,
3752 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3753 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w,
3754 VR128, memopv2i64, i128mem,
3755 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3756 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b,
3757 VR128, memopv2i64, i128mem,
3758 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3759 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w,
3760 VR128, memopv2i64, i128mem,
3761 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3762 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w,
3763 VR128, memopv2i64, i128mem,
3764 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3765 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w,
3766 VR128, memopv2i64, i128mem,
3767 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3768 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd,
3769 VR128, memopv2i64, i128mem,
3770 SSE_PMADD, 1, 0>, VEX_4V;
3771 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b,
3772 VR128, memopv2i64, i128mem,
3773 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3774 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w,
3775 VR128, memopv2i64, i128mem,
3776 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3777 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b,
3778 VR128, memopv2i64, i128mem,
3779 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3780 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w,
3781 VR128, memopv2i64, i128mem,
3782 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3783 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b,
3784 VR128, memopv2i64, i128mem,
3785 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3786 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w,
3787 VR128, memopv2i64, i128mem,
3788 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3789 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw,
3790 VR128, memopv2i64, i128mem,
3791 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3794 let Predicates = [HasAVX2] in {
3795 defm VPADDBY : PDI_binop_rm<0xFC, "vpaddb", add, v32i8, VR256, memopv4i64,
3796 i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3797 defm VPADDWY : PDI_binop_rm<0xFD, "vpaddw", add, v16i16, VR256, memopv4i64,
3798 i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3799 defm VPADDDY : PDI_binop_rm<0xFE, "vpaddd", add, v8i32, VR256, memopv4i64,
3800 i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3801 defm VPADDQY : PDI_binop_rm<0xD4, "vpaddq", add, v4i64, VR256, memopv4i64,
3802 i256mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3803 defm VPMULLWY : PDI_binop_rm<0xD5, "vpmullw", mul, v16i16, VR256, memopv4i64,
3804 i256mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3805 defm VPSUBBY : PDI_binop_rm<0xF8, "vpsubb", sub, v32i8, VR256, memopv4i64,
3806 i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3807 defm VPSUBWY : PDI_binop_rm<0xF9, "vpsubw", sub, v16i16,VR256, memopv4i64,
3808 i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3809 defm VPSUBDY : PDI_binop_rm<0xFA, "vpsubd", sub, v8i32, VR256, memopv4i64,
3810 i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3811 defm VPSUBQY : PDI_binop_rm<0xFB, "vpsubq", sub, v4i64, VR256, memopv4i64,
3812 i256mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3813 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
3814 VR256, memopv4i64, i256mem,
3815 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3818 defm VPSUBSBY : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_avx2_psubs_b,
3819 VR256, memopv4i64, i256mem,
3820 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3821 defm VPSUBSWY : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_avx2_psubs_w,
3822 VR256, memopv4i64, i256mem,
3823 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3824 defm VPSUBUSBY : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_avx2_psubus_b,
3825 VR256, memopv4i64, i256mem,
3826 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3827 defm VPSUBUSWY : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_avx2_psubus_w,
3828 VR256, memopv4i64, i256mem,
3829 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3830 defm VPADDSBY : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_avx2_padds_b,
3831 VR256, memopv4i64, i256mem,
3832 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3833 defm VPADDSWY : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_avx2_padds_w,
3834 VR256, memopv4i64, i256mem,
3835 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3836 defm VPADDUSBY : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_avx2_paddus_b,
3837 VR256, memopv4i64, i256mem,
3838 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3839 defm VPADDUSWY : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_avx2_paddus_w,
3840 VR256, memopv4i64, i256mem,
3841 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3842 defm VPMULHUWY : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_avx2_pmulhu_w,
3843 VR256, memopv4i64, i256mem,
3844 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3845 defm VPMULHWY : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_avx2_pmulh_w,
3846 VR256, memopv4i64, i256mem,
3847 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3848 defm VPMADDWDY : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_avx2_pmadd_wd,
3849 VR256, memopv4i64, i256mem,
3850 SSE_PMADD, 1, 0>, VEX_4V, VEX_L;
3851 defm VPAVGBY : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_avx2_pavg_b,
3852 VR256, memopv4i64, i256mem,
3853 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3854 defm VPAVGWY : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_avx2_pavg_w,
3855 VR256, memopv4i64, i256mem,
3856 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3857 defm VPMINUBY : PDI_binop_rm_int<0xDA, "vpminub", int_x86_avx2_pminu_b,
3858 VR256, memopv4i64, i256mem,
3859 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3860 defm VPMINSWY : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_avx2_pmins_w,
3861 VR256, memopv4i64, i256mem,
3862 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3863 defm VPMAXUBY : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_avx2_pmaxu_b,
3864 VR256, memopv4i64, i256mem,
3865 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3866 defm VPMAXSWY : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_avx2_pmaxs_w,
3867 VR256, memopv4i64, i256mem,
3868 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3869 defm VPSADBWY : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_avx2_psad_bw,
3870 VR256, memopv4i64, i256mem,
3871 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3874 let Constraints = "$src1 = $dst" in {
3875 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, VR128, memopv2i64,
3876 i128mem, SSE_INTALU_ITINS_P, 1>;
3877 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, VR128, memopv2i64,
3878 i128mem, SSE_INTALU_ITINS_P, 1>;
3879 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, VR128, memopv2i64,
3880 i128mem, SSE_INTALU_ITINS_P, 1>;
3881 defm PADDQ : PDI_binop_rm<0xD4, "paddq", add, v2i64, VR128, memopv2i64,
3882 i128mem, SSE_INTALUQ_ITINS_P, 1>;
3883 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, VR128, memopv2i64,
3884 i128mem, SSE_INTMUL_ITINS_P, 1>;
3885 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8, VR128, memopv2i64,
3886 i128mem, SSE_INTALU_ITINS_P>;
3887 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16, VR128, memopv2i64,
3888 i128mem, SSE_INTALU_ITINS_P>;
3889 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32, VR128, memopv2i64,
3890 i128mem, SSE_INTALU_ITINS_P>;
3891 defm PSUBQ : PDI_binop_rm<0xFB, "psubq", sub, v2i64, VR128, memopv2i64,
3892 i128mem, SSE_INTALUQ_ITINS_P>;
3893 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
3894 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
3897 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b,
3898 VR128, memopv2i64, i128mem,
3899 SSE_INTALU_ITINS_P>;
3900 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
3901 VR128, memopv2i64, i128mem,
3902 SSE_INTALU_ITINS_P>;
3903 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b,
3904 VR128, memopv2i64, i128mem,
3905 SSE_INTALU_ITINS_P>;
3906 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w,
3907 VR128, memopv2i64, i128mem,
3908 SSE_INTALU_ITINS_P>;
3909 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
3910 VR128, memopv2i64, i128mem,
3911 SSE_INTALU_ITINS_P, 1>;
3912 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w,
3913 VR128, memopv2i64, i128mem,
3914 SSE_INTALU_ITINS_P, 1>;
3915 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
3916 VR128, memopv2i64, i128mem,
3917 SSE_INTALU_ITINS_P, 1>;
3918 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
3919 VR128, memopv2i64, i128mem,
3920 SSE_INTALU_ITINS_P, 1>;
3921 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w,
3922 VR128, memopv2i64, i128mem,
3923 SSE_INTMUL_ITINS_P, 1>;
3924 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w,
3925 VR128, memopv2i64, i128mem,
3926 SSE_INTMUL_ITINS_P, 1>;
3927 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
3928 VR128, memopv2i64, i128mem,
3930 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
3931 VR128, memopv2i64, i128mem,
3932 SSE_INTALU_ITINS_P, 1>;
3933 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
3934 VR128, memopv2i64, i128mem,
3935 SSE_INTALU_ITINS_P, 1>;
3936 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b,
3937 VR128, memopv2i64, i128mem,
3938 SSE_INTALU_ITINS_P, 1>;
3939 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w,
3940 VR128, memopv2i64, i128mem,
3941 SSE_INTALU_ITINS_P, 1>;
3942 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b,
3943 VR128, memopv2i64, i128mem,
3944 SSE_INTALU_ITINS_P, 1>;
3945 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w,
3946 VR128, memopv2i64, i128mem,
3947 SSE_INTALU_ITINS_P, 1>;
3948 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
3949 VR128, memopv2i64, i128mem,
3950 SSE_INTALU_ITINS_P, 1>;
3952 } // Constraints = "$src1 = $dst"
3954 //===---------------------------------------------------------------------===//
3955 // SSE2 - Packed Integer Logical Instructions
3956 //===---------------------------------------------------------------------===//
3958 let Predicates = [HasAVX] in {
3959 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
3960 VR128, v8i16, v8i16, bc_v8i16,
3961 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3962 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
3963 VR128, v4i32, v4i32, bc_v4i32,
3964 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3965 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
3966 VR128, v2i64, v2i64, bc_v2i64,
3967 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3969 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
3970 VR128, v8i16, v8i16, bc_v8i16,
3971 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3972 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
3973 VR128, v4i32, v4i32, bc_v4i32,
3974 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3975 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
3976 VR128, v2i64, v2i64, bc_v2i64,
3977 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3979 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
3980 VR128, v8i16, v8i16, bc_v8i16,
3981 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3982 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
3983 VR128, v4i32, v4i32, bc_v4i32,
3984 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3986 let ExeDomain = SSEPackedInt in {
3987 // 128-bit logical shifts.
3988 def VPSLLDQri : PDIi8<0x73, MRM7r,
3989 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3990 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3992 (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>,
3994 def VPSRLDQri : PDIi8<0x73, MRM3r,
3995 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3996 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3998 (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>,
4000 // PSRADQri doesn't exist in SSE[1-3].
4002 } // Predicates = [HasAVX]
4004 let Predicates = [HasAVX2] in {
4005 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4006 VR256, v16i16, v8i16, bc_v8i16,
4007 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4008 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4009 VR256, v8i32, v4i32, bc_v4i32,
4010 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4011 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4012 VR256, v4i64, v2i64, bc_v2i64,
4013 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4015 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4016 VR256, v16i16, v8i16, bc_v8i16,
4017 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4018 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4019 VR256, v8i32, v4i32, bc_v4i32,
4020 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4021 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4022 VR256, v4i64, v2i64, bc_v2i64,
4023 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4025 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4026 VR256, v16i16, v8i16, bc_v8i16,
4027 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4028 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4029 VR256, v8i32, v4i32, bc_v4i32,
4030 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4032 let ExeDomain = SSEPackedInt in {
4033 // 256-bit logical shifts.
4034 def VPSLLDQYri : PDIi8<0x73, MRM7r,
4035 (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
4036 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4038 (int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>,
4040 def VPSRLDQYri : PDIi8<0x73, MRM3r,
4041 (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
4042 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4044 (int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>,
4046 // PSRADQYri doesn't exist in SSE[1-3].
4048 } // Predicates = [HasAVX2]
4050 let Constraints = "$src1 = $dst" in {
4051 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
4052 VR128, v8i16, v8i16, bc_v8i16,
4053 SSE_INTSHIFT_ITINS_P>;
4054 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
4055 VR128, v4i32, v4i32, bc_v4i32,
4056 SSE_INTSHIFT_ITINS_P>;
4057 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
4058 VR128, v2i64, v2i64, bc_v2i64,
4059 SSE_INTSHIFT_ITINS_P>;
4061 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
4062 VR128, v8i16, v8i16, bc_v8i16,
4063 SSE_INTSHIFT_ITINS_P>;
4064 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
4065 VR128, v4i32, v4i32, bc_v4i32,
4066 SSE_INTSHIFT_ITINS_P>;
4067 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
4068 VR128, v2i64, v2i64, bc_v2i64,
4069 SSE_INTSHIFT_ITINS_P>;
4071 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
4072 VR128, v8i16, v8i16, bc_v8i16,
4073 SSE_INTSHIFT_ITINS_P>;
4074 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
4075 VR128, v4i32, v4i32, bc_v4i32,
4076 SSE_INTSHIFT_ITINS_P>;
4078 let ExeDomain = SSEPackedInt in {
4079 // 128-bit logical shifts.
4080 def PSLLDQri : PDIi8<0x73, MRM7r,
4081 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4082 "pslldq\t{$src2, $dst|$dst, $src2}",
4084 (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>;
4085 def PSRLDQri : PDIi8<0x73, MRM3r,
4086 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4087 "psrldq\t{$src2, $dst|$dst, $src2}",
4089 (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>;
4090 // PSRADQri doesn't exist in SSE[1-3].
4092 } // Constraints = "$src1 = $dst"
4094 let Predicates = [HasAVX] in {
4095 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4096 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4097 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4098 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4099 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4100 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4102 // Shift up / down and insert zero's.
4103 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4104 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4105 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4106 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4109 let Predicates = [HasAVX2] in {
4110 def : Pat<(int_x86_avx2_psll_dq VR256:$src1, imm:$src2),
4111 (VPSLLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4112 def : Pat<(int_x86_avx2_psrl_dq VR256:$src1, imm:$src2),
4113 (VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4116 let Predicates = [UseSSE2] in {
4117 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4118 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4119 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4120 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4121 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4122 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4124 // Shift up / down and insert zero's.
4125 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4126 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4127 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4128 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4131 //===---------------------------------------------------------------------===//
4132 // SSE2 - Packed Integer Comparison Instructions
4133 //===---------------------------------------------------------------------===//
4135 let Predicates = [HasAVX] in {
4136 defm VPCMPEQB : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v16i8,
4137 VR128, memopv2i64, i128mem,
4138 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4139 defm VPCMPEQW : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v8i16,
4140 VR128, memopv2i64, i128mem,
4141 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4142 defm VPCMPEQD : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v4i32,
4143 VR128, memopv2i64, i128mem,
4144 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4145 defm VPCMPGTB : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v16i8,
4146 VR128, memopv2i64, i128mem,
4147 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4148 defm VPCMPGTW : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v8i16,
4149 VR128, memopv2i64, i128mem,
4150 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4151 defm VPCMPGTD : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v4i32,
4152 VR128, memopv2i64, i128mem,
4153 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4156 let Predicates = [HasAVX2] in {
4157 defm VPCMPEQBY : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v32i8,
4158 VR256, memopv4i64, i256mem,
4159 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4160 defm VPCMPEQWY : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v16i16,
4161 VR256, memopv4i64, i256mem,
4162 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4163 defm VPCMPEQDY : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v8i32,
4164 VR256, memopv4i64, i256mem,
4165 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4166 defm VPCMPGTBY : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v32i8,
4167 VR256, memopv4i64, i256mem,
4168 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4169 defm VPCMPGTWY : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v16i16,
4170 VR256, memopv4i64, i256mem,
4171 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4172 defm VPCMPGTDY : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v8i32,
4173 VR256, memopv4i64, i256mem,
4174 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4177 let Constraints = "$src1 = $dst" in {
4178 defm PCMPEQB : PDI_binop_rm<0x74, "pcmpeqb", X86pcmpeq, v16i8,
4179 VR128, memopv2i64, i128mem,
4180 SSE_INTALU_ITINS_P, 1>;
4181 defm PCMPEQW : PDI_binop_rm<0x75, "pcmpeqw", X86pcmpeq, v8i16,
4182 VR128, memopv2i64, i128mem,
4183 SSE_INTALU_ITINS_P, 1>;
4184 defm PCMPEQD : PDI_binop_rm<0x76, "pcmpeqd", X86pcmpeq, v4i32,
4185 VR128, memopv2i64, i128mem,
4186 SSE_INTALU_ITINS_P, 1>;
4187 defm PCMPGTB : PDI_binop_rm<0x64, "pcmpgtb", X86pcmpgt, v16i8,
4188 VR128, memopv2i64, i128mem,
4189 SSE_INTALU_ITINS_P>;
4190 defm PCMPGTW : PDI_binop_rm<0x65, "pcmpgtw", X86pcmpgt, v8i16,
4191 VR128, memopv2i64, i128mem,
4192 SSE_INTALU_ITINS_P>;
4193 defm PCMPGTD : PDI_binop_rm<0x66, "pcmpgtd", X86pcmpgt, v4i32,
4194 VR128, memopv2i64, i128mem,
4195 SSE_INTALU_ITINS_P>;
4196 } // Constraints = "$src1 = $dst"
4198 //===---------------------------------------------------------------------===//
4199 // SSE2 - Packed Integer Pack Instructions
4200 //===---------------------------------------------------------------------===//
4202 let Predicates = [HasAVX] in {
4203 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
4204 VR128, memopv2i64, i128mem,
4205 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4206 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
4207 VR128, memopv2i64, i128mem,
4208 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4209 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
4210 VR128, memopv2i64, i128mem,
4211 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4214 let Predicates = [HasAVX2] in {
4215 defm VPACKSSWBY : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_avx2_packsswb,
4216 VR256, memopv4i64, i256mem,
4217 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4218 defm VPACKSSDWY : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_avx2_packssdw,
4219 VR256, memopv4i64, i256mem,
4220 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4221 defm VPACKUSWBY : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_avx2_packuswb,
4222 VR256, memopv4i64, i256mem,
4223 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4226 let Constraints = "$src1 = $dst" in {
4227 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128,
4228 VR128, memopv2i64, i128mem,
4229 SSE_INTALU_ITINS_P>;
4230 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128,
4231 VR128, memopv2i64, i128mem,
4232 SSE_INTALU_ITINS_P>;
4233 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128,
4234 VR128, memopv2i64, i128mem,
4235 SSE_INTALU_ITINS_P>;
4236 } // Constraints = "$src1 = $dst"
4238 //===---------------------------------------------------------------------===//
4239 // SSE2 - Packed Integer Shuffle Instructions
4240 //===---------------------------------------------------------------------===//
4242 let ExeDomain = SSEPackedInt in {
4243 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, SDNode OpNode> {
4244 def ri : Ii8<0x70, MRMSrcReg,
4245 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
4246 !strconcat(OpcodeStr,
4247 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4248 [(set VR128:$dst, (vt (OpNode VR128:$src1, (i8 imm:$src2))))],
4250 def mi : Ii8<0x70, MRMSrcMem,
4251 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
4252 !strconcat(OpcodeStr,
4253 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4255 (vt (OpNode (bitconvert (memopv2i64 addr:$src1)),
4260 multiclass sse2_pshuffle_y<string OpcodeStr, ValueType vt, SDNode OpNode> {
4261 def Yri : Ii8<0x70, MRMSrcReg,
4262 (outs VR256:$dst), (ins VR256:$src1, i8imm:$src2),
4263 !strconcat(OpcodeStr,
4264 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4265 [(set VR256:$dst, (vt (OpNode VR256:$src1, (i8 imm:$src2))))]>;
4266 def Ymi : Ii8<0x70, MRMSrcMem,
4267 (outs VR256:$dst), (ins i256mem:$src1, i8imm:$src2),
4268 !strconcat(OpcodeStr,
4269 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4271 (vt (OpNode (bitconvert (memopv4i64 addr:$src1)),
4272 (i8 imm:$src2))))]>;
4274 } // ExeDomain = SSEPackedInt
4276 let Predicates = [HasAVX] in {
4277 let AddedComplexity = 5 in
4278 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, X86PShufd>, TB, OpSize, VEX;
4280 // SSE2 with ImmT == Imm8 and XS prefix.
4281 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, X86PShufhw>, XS, VEX;
4283 // SSE2 with ImmT == Imm8 and XD prefix.
4284 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, X86PShuflw>, XD, VEX;
4286 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4287 (VPSHUFDmi addr:$src1, imm:$imm)>;
4288 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4289 (VPSHUFDri VR128:$src1, imm:$imm)>;
4292 let Predicates = [HasAVX2] in {
4293 defm VPSHUFD : sse2_pshuffle_y<"vpshufd", v8i32, X86PShufd>,
4294 TB, OpSize, VEX,VEX_L;
4295 defm VPSHUFHW : sse2_pshuffle_y<"vpshufhw", v16i16, X86PShufhw>,
4297 defm VPSHUFLW : sse2_pshuffle_y<"vpshuflw", v16i16, X86PShuflw>,
4301 let Predicates = [UseSSE2] in {
4302 let AddedComplexity = 5 in
4303 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, X86PShufd>, TB, OpSize;
4305 // SSE2 with ImmT == Imm8 and XS prefix.
4306 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, X86PShufhw>, XS;
4308 // SSE2 with ImmT == Imm8 and XD prefix.
4309 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, X86PShuflw>, XD;
4311 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4312 (PSHUFDmi addr:$src1, imm:$imm)>;
4313 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4314 (PSHUFDri VR128:$src1, imm:$imm)>;
4317 //===---------------------------------------------------------------------===//
4318 // SSE2 - Packed Integer Unpack Instructions
4319 //===---------------------------------------------------------------------===//
4321 let ExeDomain = SSEPackedInt in {
4322 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4323 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
4324 def rr : PDI<opc, MRMSrcReg,
4325 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4327 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4328 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4329 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4331 def rm : PDI<opc, MRMSrcMem,
4332 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4334 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4335 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4336 [(set VR128:$dst, (OpNode VR128:$src1,
4337 (bc_frag (memopv2i64
4342 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4343 SDNode OpNode, PatFrag bc_frag> {
4344 def Yrr : PDI<opc, MRMSrcReg,
4345 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4346 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4347 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>;
4348 def Yrm : PDI<opc, MRMSrcMem,
4349 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4350 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4351 [(set VR256:$dst, (OpNode VR256:$src1,
4352 (bc_frag (memopv4i64 addr:$src2))))]>;
4355 let Predicates = [HasAVX] in {
4356 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4357 bc_v16i8, 0>, VEX_4V;
4358 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4359 bc_v8i16, 0>, VEX_4V;
4360 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4361 bc_v4i32, 0>, VEX_4V;
4362 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4363 bc_v2i64, 0>, VEX_4V;
4365 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4366 bc_v16i8, 0>, VEX_4V;
4367 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4368 bc_v8i16, 0>, VEX_4V;
4369 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4370 bc_v4i32, 0>, VEX_4V;
4371 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4372 bc_v2i64, 0>, VEX_4V;
4375 let Predicates = [HasAVX2] in {
4376 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
4377 bc_v32i8>, VEX_4V, VEX_L;
4378 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
4379 bc_v16i16>, VEX_4V, VEX_L;
4380 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
4381 bc_v8i32>, VEX_4V, VEX_L;
4382 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
4383 bc_v4i64>, VEX_4V, VEX_L;
4385 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
4386 bc_v32i8>, VEX_4V, VEX_L;
4387 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
4388 bc_v16i16>, VEX_4V, VEX_L;
4389 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
4390 bc_v8i32>, VEX_4V, VEX_L;
4391 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
4392 bc_v4i64>, VEX_4V, VEX_L;
4395 let Constraints = "$src1 = $dst" in {
4396 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4398 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4400 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4402 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4405 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4407 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4409 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4411 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4414 } // ExeDomain = SSEPackedInt
4416 //===---------------------------------------------------------------------===//
4417 // SSE2 - Packed Integer Extract and Insert
4418 //===---------------------------------------------------------------------===//
4420 let ExeDomain = SSEPackedInt in {
4421 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4422 def rri : Ii8<0xC4, MRMSrcReg,
4423 (outs VR128:$dst), (ins VR128:$src1,
4424 GR32:$src2, i32i8imm:$src3),
4426 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4427 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4429 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))], IIC_SSE_PINSRW>;
4430 def rmi : Ii8<0xC4, MRMSrcMem,
4431 (outs VR128:$dst), (ins VR128:$src1,
4432 i16mem:$src2, i32i8imm:$src3),
4434 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4435 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4437 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4438 imm:$src3))], IIC_SSE_PINSRW>;
4442 let Predicates = [HasAVX] in
4443 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4444 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
4445 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4446 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
4447 imm:$src2))]>, TB, OpSize, VEX;
4448 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4449 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
4450 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4451 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
4452 imm:$src2))], IIC_SSE_PEXTRW>;
4455 let Predicates = [HasAVX] in {
4456 defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
4457 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
4458 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4459 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
4460 []>, TB, OpSize, VEX_4V;
4463 let Constraints = "$src1 = $dst" in
4464 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[UseSSE2]>;
4466 } // ExeDomain = SSEPackedInt
4468 //===---------------------------------------------------------------------===//
4469 // SSE2 - Packed Mask Creation
4470 //===---------------------------------------------------------------------===//
4472 let ExeDomain = SSEPackedInt in {
4474 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
4475 "pmovmskb\t{$src, $dst|$dst, $src}",
4476 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4477 IIC_SSE_MOVMSK>, VEX;
4478 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
4479 "pmovmskb\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK>, VEX;
4481 let Predicates = [HasAVX2] in {
4482 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
4483 "pmovmskb\t{$src, $dst|$dst, $src}",
4484 [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX, VEX_L;
4485 def VPMOVMSKBYr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
4486 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
4489 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
4490 "pmovmskb\t{$src, $dst|$dst, $src}",
4491 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4494 } // ExeDomain = SSEPackedInt
4496 //===---------------------------------------------------------------------===//
4497 // SSE2 - Conditional Store
4498 //===---------------------------------------------------------------------===//
4500 let ExeDomain = SSEPackedInt in {
4503 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4504 (ins VR128:$src, VR128:$mask),
4505 "maskmovdqu\t{$mask, $src|$src, $mask}",
4506 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4507 IIC_SSE_MASKMOV>, VEX;
4509 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4510 (ins VR128:$src, VR128:$mask),
4511 "maskmovdqu\t{$mask, $src|$src, $mask}",
4512 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4513 IIC_SSE_MASKMOV>, VEX;
4516 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4517 "maskmovdqu\t{$mask, $src|$src, $mask}",
4518 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4521 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4522 "maskmovdqu\t{$mask, $src|$src, $mask}",
4523 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4526 } // ExeDomain = SSEPackedInt
4528 //===---------------------------------------------------------------------===//
4529 // SSE2 - Move Doubleword
4530 //===---------------------------------------------------------------------===//
4532 //===---------------------------------------------------------------------===//
4533 // Move Int Doubleword to Packed Double Int
4535 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4536 "movd\t{$src, $dst|$dst, $src}",
4538 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4540 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4541 "movd\t{$src, $dst|$dst, $src}",
4543 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4546 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4547 "mov{d|q}\t{$src, $dst|$dst, $src}",
4549 (v2i64 (scalar_to_vector GR64:$src)))],
4550 IIC_SSE_MOVDQ>, VEX;
4551 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4552 "mov{d|q}\t{$src, $dst|$dst, $src}",
4553 [(set FR64:$dst, (bitconvert GR64:$src))],
4554 IIC_SSE_MOVDQ>, VEX;
4556 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4557 "movd\t{$src, $dst|$dst, $src}",
4559 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>;
4560 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4561 "movd\t{$src, $dst|$dst, $src}",
4563 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4565 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4566 "mov{d|q}\t{$src, $dst|$dst, $src}",
4568 (v2i64 (scalar_to_vector GR64:$src)))],
4570 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4571 "mov{d|q}\t{$src, $dst|$dst, $src}",
4572 [(set FR64:$dst, (bitconvert GR64:$src))],
4575 //===---------------------------------------------------------------------===//
4576 // Move Int Doubleword to Single Scalar
4578 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4579 "movd\t{$src, $dst|$dst, $src}",
4580 [(set FR32:$dst, (bitconvert GR32:$src))],
4581 IIC_SSE_MOVDQ>, VEX;
4583 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4584 "movd\t{$src, $dst|$dst, $src}",
4585 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4588 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4589 "movd\t{$src, $dst|$dst, $src}",
4590 [(set FR32:$dst, (bitconvert GR32:$src))],
4593 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4594 "movd\t{$src, $dst|$dst, $src}",
4595 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4598 //===---------------------------------------------------------------------===//
4599 // Move Packed Doubleword Int to Packed Double Int
4601 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4602 "movd\t{$src, $dst|$dst, $src}",
4603 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4604 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX;
4605 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
4606 (ins i32mem:$dst, VR128:$src),
4607 "movd\t{$src, $dst|$dst, $src}",
4608 [(store (i32 (vector_extract (v4i32 VR128:$src),
4609 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
4611 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4612 "movd\t{$src, $dst|$dst, $src}",
4613 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4614 (iPTR 0)))], IIC_SSE_MOVD_ToGP>;
4615 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4616 "movd\t{$src, $dst|$dst, $src}",
4617 [(store (i32 (vector_extract (v4i32 VR128:$src),
4618 (iPTR 0))), addr:$dst)],
4621 //===---------------------------------------------------------------------===//
4622 // Move Packed Doubleword Int first element to Doubleword Int
4624 def VMOVPQIto64rr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4625 "vmov{d|q}\t{$src, $dst|$dst, $src}",
4626 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4629 TB, OpSize, VEX, VEX_W, Requires<[HasAVX, In64BitMode]>;
4631 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4632 "mov{d|q}\t{$src, $dst|$dst, $src}",
4633 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4637 //===---------------------------------------------------------------------===//
4638 // Bitcast FR64 <-> GR64
4640 let Predicates = [HasAVX] in
4641 def VMOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4642 "vmovq\t{$src, $dst|$dst, $src}",
4643 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4645 def VMOVSDto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4646 "mov{d|q}\t{$src, $dst|$dst, $src}",
4647 [(set GR64:$dst, (bitconvert FR64:$src))],
4648 IIC_SSE_MOVDQ>, VEX;
4649 def VMOVSDto64mr : VRPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4650 "movq\t{$src, $dst|$dst, $src}",
4651 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4652 IIC_SSE_MOVDQ>, VEX;
4654 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4655 "movq\t{$src, $dst|$dst, $src}",
4656 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
4658 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4659 "mov{d|q}\t{$src, $dst|$dst, $src}",
4660 [(set GR64:$dst, (bitconvert FR64:$src))],
4662 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4663 "movq\t{$src, $dst|$dst, $src}",
4664 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4667 //===---------------------------------------------------------------------===//
4668 // Move Scalar Single to Double Int
4670 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4671 "movd\t{$src, $dst|$dst, $src}",
4672 [(set GR32:$dst, (bitconvert FR32:$src))],
4673 IIC_SSE_MOVD_ToGP>, VEX;
4674 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4675 "movd\t{$src, $dst|$dst, $src}",
4676 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4677 IIC_SSE_MOVDQ>, VEX;
4678 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4679 "movd\t{$src, $dst|$dst, $src}",
4680 [(set GR32:$dst, (bitconvert FR32:$src))],
4682 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4683 "movd\t{$src, $dst|$dst, $src}",
4684 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4687 //===---------------------------------------------------------------------===//
4688 // Patterns and instructions to describe movd/movq to XMM register zero-extends
4690 let AddedComplexity = 15 in {
4691 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4692 "movd\t{$src, $dst|$dst, $src}",
4693 [(set VR128:$dst, (v4i32 (X86vzmovl
4694 (v4i32 (scalar_to_vector GR32:$src)))))],
4695 IIC_SSE_MOVDQ>, VEX;
4696 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4697 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4698 [(set VR128:$dst, (v2i64 (X86vzmovl
4699 (v2i64 (scalar_to_vector GR64:$src)))))],
4703 let AddedComplexity = 15 in {
4704 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4705 "movd\t{$src, $dst|$dst, $src}",
4706 [(set VR128:$dst, (v4i32 (X86vzmovl
4707 (v4i32 (scalar_to_vector GR32:$src)))))],
4709 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4710 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4711 [(set VR128:$dst, (v2i64 (X86vzmovl
4712 (v2i64 (scalar_to_vector GR64:$src)))))],
4716 let AddedComplexity = 20 in {
4717 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4718 "movd\t{$src, $dst|$dst, $src}",
4720 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4721 (loadi32 addr:$src))))))],
4722 IIC_SSE_MOVDQ>, VEX;
4723 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4724 "movd\t{$src, $dst|$dst, $src}",
4726 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4727 (loadi32 addr:$src))))))],
4731 let Predicates = [HasAVX] in {
4732 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
4733 let AddedComplexity = 20 in {
4734 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4735 (VMOVZDI2PDIrm addr:$src)>;
4736 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4737 (VMOVZDI2PDIrm addr:$src)>;
4739 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4740 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4741 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
4742 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
4743 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4744 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
4745 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
4748 let Predicates = [UseSSE2], AddedComplexity = 20 in {
4749 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4750 (MOVZDI2PDIrm addr:$src)>;
4751 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4752 (MOVZDI2PDIrm addr:$src)>;
4755 // These are the correct encodings of the instructions so that we know how to
4756 // read correct assembly, even though we continue to emit the wrong ones for
4757 // compatibility with Darwin's buggy assembler.
4758 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4759 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4760 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4761 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
4762 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4763 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4764 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4765 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
4766 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4767 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4768 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4769 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4771 //===---------------------------------------------------------------------===//
4772 // SSE2 - Move Quadword
4773 //===---------------------------------------------------------------------===//
4775 //===---------------------------------------------------------------------===//
4776 // Move Quadword Int to Packed Quadword Int
4778 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4779 "vmovq\t{$src, $dst|$dst, $src}",
4781 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4782 VEX, Requires<[HasAVX]>;
4783 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4784 "movq\t{$src, $dst|$dst, $src}",
4786 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
4788 Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
4790 //===---------------------------------------------------------------------===//
4791 // Move Packed Quadword Int to Quadword Int
4793 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4794 "movq\t{$src, $dst|$dst, $src}",
4795 [(store (i64 (vector_extract (v2i64 VR128:$src),
4796 (iPTR 0))), addr:$dst)],
4797 IIC_SSE_MOVDQ>, VEX;
4798 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4799 "movq\t{$src, $dst|$dst, $src}",
4800 [(store (i64 (vector_extract (v2i64 VR128:$src),
4801 (iPTR 0))), addr:$dst)],
4804 //===---------------------------------------------------------------------===//
4805 // Store / copy lower 64-bits of a XMM register.
4807 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4808 "movq\t{$src, $dst|$dst, $src}",
4809 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
4810 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4811 "movq\t{$src, $dst|$dst, $src}",
4812 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)],
4815 let AddedComplexity = 20 in
4816 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4817 "vmovq\t{$src, $dst|$dst, $src}",
4819 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4820 (loadi64 addr:$src))))))],
4822 XS, VEX, Requires<[HasAVX]>;
4824 let AddedComplexity = 20 in
4825 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4826 "movq\t{$src, $dst|$dst, $src}",
4828 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4829 (loadi64 addr:$src))))))],
4831 XS, Requires<[UseSSE2]>;
4833 let Predicates = [HasAVX], AddedComplexity = 20 in {
4834 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4835 (VMOVZQI2PQIrm addr:$src)>;
4836 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4837 (VMOVZQI2PQIrm addr:$src)>;
4838 def : Pat<(v2i64 (X86vzload addr:$src)),
4839 (VMOVZQI2PQIrm addr:$src)>;
4842 let Predicates = [UseSSE2], AddedComplexity = 20 in {
4843 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4844 (MOVZQI2PQIrm addr:$src)>;
4845 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4846 (MOVZQI2PQIrm addr:$src)>;
4847 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
4850 let Predicates = [HasAVX] in {
4851 def : Pat<(v4i64 (alignedX86vzload addr:$src)),
4852 (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
4853 def : Pat<(v4i64 (X86vzload addr:$src)),
4854 (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
4857 //===---------------------------------------------------------------------===//
4858 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
4859 // IA32 document. movq xmm1, xmm2 does clear the high bits.
4861 let AddedComplexity = 15 in
4862 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4863 "vmovq\t{$src, $dst|$dst, $src}",
4864 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
4866 XS, VEX, Requires<[HasAVX]>;
4867 let AddedComplexity = 15 in
4868 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4869 "movq\t{$src, $dst|$dst, $src}",
4870 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
4872 XS, Requires<[UseSSE2]>;
4874 let AddedComplexity = 20 in
4875 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4876 "vmovq\t{$src, $dst|$dst, $src}",
4877 [(set VR128:$dst, (v2i64 (X86vzmovl
4878 (loadv2i64 addr:$src))))],
4880 XS, VEX, Requires<[HasAVX]>;
4881 let AddedComplexity = 20 in {
4882 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4883 "movq\t{$src, $dst|$dst, $src}",
4884 [(set VR128:$dst, (v2i64 (X86vzmovl
4885 (loadv2i64 addr:$src))))],
4887 XS, Requires<[UseSSE2]>;
4890 let AddedComplexity = 20 in {
4891 let Predicates = [HasAVX] in {
4892 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4893 (VMOVZPQILo2PQIrm addr:$src)>;
4894 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4895 (VMOVZPQILo2PQIrr VR128:$src)>;
4897 let Predicates = [UseSSE2] in {
4898 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4899 (MOVZPQILo2PQIrm addr:$src)>;
4900 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4901 (MOVZPQILo2PQIrr VR128:$src)>;
4905 // Instructions to match in the assembler
4906 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4907 "movq\t{$src, $dst|$dst, $src}", [],
4908 IIC_SSE_MOVDQ>, VEX, VEX_W;
4909 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4910 "movq\t{$src, $dst|$dst, $src}", [],
4911 IIC_SSE_MOVDQ>, VEX, VEX_W;
4912 // Recognize "movd" with GR64 destination, but encode as a "movq"
4913 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4914 "movd\t{$src, $dst|$dst, $src}", [],
4915 IIC_SSE_MOVDQ>, VEX, VEX_W;
4917 // Instructions for the disassembler
4918 // xr = XMM register
4921 let Predicates = [HasAVX] in
4922 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4923 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
4924 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4925 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, XS;
4927 //===---------------------------------------------------------------------===//
4928 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
4929 //===---------------------------------------------------------------------===//
4930 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
4931 ValueType vt, RegisterClass RC, PatFrag mem_frag,
4932 X86MemOperand x86memop> {
4933 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4934 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4935 [(set RC:$dst, (vt (OpNode RC:$src)))],
4937 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
4938 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4939 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
4943 let Predicates = [HasAVX] in {
4944 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4945 v4f32, VR128, memopv4f32, f128mem>, VEX;
4946 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4947 v4f32, VR128, memopv4f32, f128mem>, VEX;
4948 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4949 v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L;
4950 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4951 v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L;
4953 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
4954 memopv4f32, f128mem>;
4955 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
4956 memopv4f32, f128mem>;
4958 let Predicates = [HasAVX] in {
4959 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4960 (VMOVSHDUPrr VR128:$src)>;
4961 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4962 (VMOVSHDUPrm addr:$src)>;
4963 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4964 (VMOVSLDUPrr VR128:$src)>;
4965 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4966 (VMOVSLDUPrm addr:$src)>;
4967 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
4968 (VMOVSHDUPYrr VR256:$src)>;
4969 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
4970 (VMOVSHDUPYrm addr:$src)>;
4971 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
4972 (VMOVSLDUPYrr VR256:$src)>;
4973 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
4974 (VMOVSLDUPYrm addr:$src)>;
4977 let Predicates = [UseSSE3] in {
4978 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4979 (MOVSHDUPrr VR128:$src)>;
4980 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4981 (MOVSHDUPrm addr:$src)>;
4982 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4983 (MOVSLDUPrr VR128:$src)>;
4984 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4985 (MOVSLDUPrm addr:$src)>;
4988 //===---------------------------------------------------------------------===//
4989 // SSE3 - Replicate Double FP - MOVDDUP
4990 //===---------------------------------------------------------------------===//
4992 multiclass sse3_replicate_dfp<string OpcodeStr> {
4993 let neverHasSideEffects = 1 in
4994 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4995 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4996 [], IIC_SSE_MOV_LH>;
4997 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
4998 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5001 (scalar_to_vector (loadf64 addr:$src)))))],
5005 // FIXME: Merge with above classe when there're patterns for the ymm version
5006 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
5007 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
5008 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5009 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>;
5010 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
5011 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5014 (scalar_to_vector (loadf64 addr:$src)))))]>;
5017 let Predicates = [HasAVX] in {
5018 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
5019 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
5022 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
5024 let Predicates = [HasAVX] in {
5025 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5026 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5027 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5028 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5029 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5030 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5031 def : Pat<(X86Movddup (bc_v2f64
5032 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5033 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5036 def : Pat<(X86Movddup (memopv4f64 addr:$src)),
5037 (VMOVDDUPYrm addr:$src)>;
5038 def : Pat<(X86Movddup (memopv4i64 addr:$src)),
5039 (VMOVDDUPYrm addr:$src)>;
5040 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
5041 (VMOVDDUPYrm addr:$src)>;
5042 def : Pat<(X86Movddup (v4i64 VR256:$src)),
5043 (VMOVDDUPYrr VR256:$src)>;
5046 let Predicates = [UseSSE3] in {
5047 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5048 (MOVDDUPrm addr:$src)>;
5049 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5050 (MOVDDUPrm addr:$src)>;
5051 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5052 (MOVDDUPrm addr:$src)>;
5053 def : Pat<(X86Movddup (bc_v2f64
5054 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5055 (MOVDDUPrm addr:$src)>;
5058 //===---------------------------------------------------------------------===//
5059 // SSE3 - Move Unaligned Integer
5060 //===---------------------------------------------------------------------===//
5062 let Predicates = [HasAVX] in {
5063 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5064 "vlddqu\t{$src, $dst|$dst, $src}",
5065 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
5066 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
5067 "vlddqu\t{$src, $dst|$dst, $src}",
5068 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
5071 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5072 "lddqu\t{$src, $dst|$dst, $src}",
5073 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
5076 //===---------------------------------------------------------------------===//
5077 // SSE3 - Arithmetic
5078 //===---------------------------------------------------------------------===//
5080 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
5081 X86MemOperand x86memop, OpndItins itins,
5083 def rr : I<0xD0, MRMSrcReg,
5084 (outs RC:$dst), (ins RC:$src1, RC:$src2),
5086 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5087 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5088 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>;
5089 def rm : I<0xD0, MRMSrcMem,
5090 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5092 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5093 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5094 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))], itins.rr>;
5097 let Predicates = [HasAVX] in {
5098 let ExeDomain = SSEPackedSingle in {
5099 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
5100 f128mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
5101 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
5102 f256mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V, VEX_L;
5104 let ExeDomain = SSEPackedDouble in {
5105 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5106 f128mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
5107 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5108 f256mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V, VEX_L;
5111 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
5112 let ExeDomain = SSEPackedSingle in
5113 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5114 f128mem, SSE_ALU_F32P>, TB, XD;
5115 let ExeDomain = SSEPackedDouble in
5116 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5117 f128mem, SSE_ALU_F64P>, TB, OpSize;
5120 //===---------------------------------------------------------------------===//
5121 // SSE3 Instructions
5122 //===---------------------------------------------------------------------===//
5125 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5126 X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
5127 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5129 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5130 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5131 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>;
5133 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5135 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5136 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5137 [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
5138 IIC_SSE_HADDSUB_RM>;
5140 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5141 X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
5142 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5144 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5145 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5146 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>;
5148 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5150 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5151 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5152 [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
5153 IIC_SSE_HADDSUB_RM>;
5156 let Predicates = [HasAVX] in {
5157 let ExeDomain = SSEPackedSingle in {
5158 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5159 X86fhadd, 0>, VEX_4V;
5160 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5161 X86fhsub, 0>, VEX_4V;
5162 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5163 X86fhadd, 0>, VEX_4V, VEX_L;
5164 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5165 X86fhsub, 0>, VEX_4V, VEX_L;
5167 let ExeDomain = SSEPackedDouble in {
5168 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5169 X86fhadd, 0>, VEX_4V;
5170 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5171 X86fhsub, 0>, VEX_4V;
5172 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5173 X86fhadd, 0>, VEX_4V, VEX_L;
5174 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5175 X86fhsub, 0>, VEX_4V, VEX_L;
5179 let Constraints = "$src1 = $dst" in {
5180 let ExeDomain = SSEPackedSingle in {
5181 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd>;
5182 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub>;
5184 let ExeDomain = SSEPackedDouble in {
5185 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd>;
5186 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub>;
5190 //===---------------------------------------------------------------------===//
5191 // SSSE3 - Packed Absolute Instructions
5192 //===---------------------------------------------------------------------===//
5195 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5196 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
5197 Intrinsic IntId128> {
5198 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5200 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5201 [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
5204 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5206 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5209 (bitconvert (memopv2i64 addr:$src))))], IIC_SSE_PABS_RM>,
5213 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5214 multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
5215 Intrinsic IntId256> {
5216 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5218 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5219 [(set VR256:$dst, (IntId256 VR256:$src))]>,
5222 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5224 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5227 (bitconvert (memopv4i64 addr:$src))))]>, OpSize;
5230 let Predicates = [HasAVX] in {
5231 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb",
5232 int_x86_ssse3_pabs_b_128>, VEX;
5233 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw",
5234 int_x86_ssse3_pabs_w_128>, VEX;
5235 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd",
5236 int_x86_ssse3_pabs_d_128>, VEX;
5239 let Predicates = [HasAVX2] in {
5240 defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
5241 int_x86_avx2_pabs_b>, VEX, VEX_L;
5242 defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
5243 int_x86_avx2_pabs_w>, VEX, VEX_L;
5244 defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
5245 int_x86_avx2_pabs_d>, VEX, VEX_L;
5248 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb",
5249 int_x86_ssse3_pabs_b_128>;
5250 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw",
5251 int_x86_ssse3_pabs_w_128>;
5252 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd",
5253 int_x86_ssse3_pabs_d_128>;
5255 //===---------------------------------------------------------------------===//
5256 // SSSE3 - Packed Binary Operator Instructions
5257 //===---------------------------------------------------------------------===//
5259 def SSE_PHADDSUBD : OpndItins<
5260 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5262 def SSE_PHADDSUBSW : OpndItins<
5263 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5265 def SSE_PHADDSUBW : OpndItins<
5266 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5268 def SSE_PSHUFB : OpndItins<
5269 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5271 def SSE_PSIGN : OpndItins<
5272 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5274 def SSE_PMULHRSW : OpndItins<
5275 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5278 /// SS3I_binop_rm - Simple SSSE3 bin op
5279 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5280 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5281 X86MemOperand x86memop, OpndItins itins,
5283 let isCommutable = 1 in
5284 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5285 (ins RC:$src1, RC:$src2),
5287 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5288 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5289 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5291 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5292 (ins RC:$src1, x86memop:$src2),
5294 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5295 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5297 (OpVT (OpNode RC:$src1,
5298 (bitconvert (memop_frag addr:$src2)))))], itins.rm>, OpSize;
5301 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5302 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5303 Intrinsic IntId128, OpndItins itins,
5305 let isCommutable = 1 in
5306 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5307 (ins VR128:$src1, VR128:$src2),
5309 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5310 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5311 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5313 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5314 (ins VR128:$src1, i128mem:$src2),
5316 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5317 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5319 (IntId128 VR128:$src1,
5320 (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
5323 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5324 Intrinsic IntId256> {
5325 let isCommutable = 1 in
5326 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5327 (ins VR256:$src1, VR256:$src2),
5328 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5329 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5331 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5332 (ins VR256:$src1, i256mem:$src2),
5333 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5335 (IntId256 VR256:$src1,
5336 (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
5339 let ImmT = NoImm, Predicates = [HasAVX] in {
5340 let isCommutable = 0 in {
5341 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5342 memopv2i64, i128mem,
5343 SSE_PHADDSUBW, 0>, VEX_4V;
5344 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5345 memopv2i64, i128mem,
5346 SSE_PHADDSUBD, 0>, VEX_4V;
5347 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5348 memopv2i64, i128mem,
5349 SSE_PHADDSUBW, 0>, VEX_4V;
5350 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5351 memopv2i64, i128mem,
5352 SSE_PHADDSUBD, 0>, VEX_4V;
5353 defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
5354 memopv2i64, i128mem,
5355 SSE_PSIGN, 0>, VEX_4V;
5356 defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
5357 memopv2i64, i128mem,
5358 SSE_PSIGN, 0>, VEX_4V;
5359 defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
5360 memopv2i64, i128mem,
5361 SSE_PSIGN, 0>, VEX_4V;
5362 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5363 memopv2i64, i128mem,
5364 SSE_PSHUFB, 0>, VEX_4V;
5365 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5366 int_x86_ssse3_phadd_sw_128,
5367 SSE_PHADDSUBSW, 0>, VEX_4V;
5368 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5369 int_x86_ssse3_phsub_sw_128,
5370 SSE_PHADDSUBSW, 0>, VEX_4V;
5371 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5372 int_x86_ssse3_pmadd_ub_sw_128,
5373 SSE_PMADD, 0>, VEX_4V;
5375 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5376 int_x86_ssse3_pmul_hr_sw_128,
5377 SSE_PMULHRSW, 0>, VEX_4V;
5380 let ImmT = NoImm, Predicates = [HasAVX2] in {
5381 let isCommutable = 0 in {
5382 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5383 memopv4i64, i256mem,
5384 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5385 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5386 memopv4i64, i256mem,
5387 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5388 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5389 memopv4i64, i256mem,
5390 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5391 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5392 memopv4i64, i256mem,
5393 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5394 defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
5395 memopv4i64, i256mem,
5396 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5397 defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
5398 memopv4i64, i256mem,
5399 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5400 defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
5401 memopv4i64, i256mem,
5402 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5403 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5404 memopv4i64, i256mem,
5405 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5406 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5407 int_x86_avx2_phadd_sw>, VEX_4V, VEX_L;
5408 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5409 int_x86_avx2_phsub_sw>, VEX_4V, VEX_L;
5410 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5411 int_x86_avx2_pmadd_ub_sw>, VEX_4V, VEX_L;
5413 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5414 int_x86_avx2_pmul_hr_sw>, VEX_4V, VEX_L;
5417 // None of these have i8 immediate fields.
5418 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5419 let isCommutable = 0 in {
5420 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5421 memopv2i64, i128mem, SSE_PHADDSUBW>;
5422 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5423 memopv2i64, i128mem, SSE_PHADDSUBD>;
5424 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5425 memopv2i64, i128mem, SSE_PHADDSUBW>;
5426 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5427 memopv2i64, i128mem, SSE_PHADDSUBD>;
5428 defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
5429 memopv2i64, i128mem, SSE_PSIGN>;
5430 defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
5431 memopv2i64, i128mem, SSE_PSIGN>;
5432 defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
5433 memopv2i64, i128mem, SSE_PSIGN>;
5434 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5435 memopv2i64, i128mem, SSE_PSHUFB>;
5436 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5437 int_x86_ssse3_phadd_sw_128,
5439 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5440 int_x86_ssse3_phsub_sw_128,
5442 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5443 int_x86_ssse3_pmadd_ub_sw_128, SSE_PMADD>;
5445 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5446 int_x86_ssse3_pmul_hr_sw_128,
5450 //===---------------------------------------------------------------------===//
5451 // SSSE3 - Packed Align Instruction Patterns
5452 //===---------------------------------------------------------------------===//
5454 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
5455 let neverHasSideEffects = 1 in {
5456 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
5457 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5459 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5461 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5462 [], IIC_SSE_PALIGNR>, OpSize;
5464 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
5465 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5467 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5469 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5470 [], IIC_SSE_PALIGNR>, OpSize;
5474 multiclass ssse3_palign_y<string asm, bit Is2Addr = 1> {
5475 let neverHasSideEffects = 1 in {
5476 def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
5477 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5479 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5482 def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
5483 (ins VR256:$src1, i256mem:$src2, i8imm:$src3),
5485 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5490 let Predicates = [HasAVX] in
5491 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
5492 let Predicates = [HasAVX2] in
5493 defm VPALIGN : ssse3_palign_y<"vpalignr", 0>, VEX_4V, VEX_L;
5494 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
5495 defm PALIGN : ssse3_palign<"palignr">;
5497 let Predicates = [HasAVX2] in {
5498 def : Pat<(v8i32 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5499 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5500 def : Pat<(v8f32 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5501 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5502 def : Pat<(v16i16 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5503 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5504 def : Pat<(v32i8 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5505 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5508 let Predicates = [HasAVX] in {
5509 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5510 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5511 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5512 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5513 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5514 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5515 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5516 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5519 let Predicates = [UseSSSE3] in {
5520 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5521 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5522 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5523 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5524 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5525 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5526 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5527 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5530 //===---------------------------------------------------------------------===//
5531 // SSSE3 - Thread synchronization
5532 //===---------------------------------------------------------------------===//
5534 let usesCustomInserter = 1 in {
5535 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5536 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5537 Requires<[HasSSE3]>;
5540 let Uses = [EAX, ECX, EDX] in
5541 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
5542 TB, Requires<[HasSSE3]>;
5543 let Uses = [ECX, EAX] in
5544 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5545 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
5546 TB, Requires<[HasSSE3]>;
5548 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
5549 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
5551 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
5552 Requires<[In32BitMode]>;
5553 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
5554 Requires<[In64BitMode]>;
5556 //===----------------------------------------------------------------------===//
5557 // SSE4.1 - Packed Move with Sign/Zero Extend
5558 //===----------------------------------------------------------------------===//
5560 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5561 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5562 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5563 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5565 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5566 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5568 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
5572 multiclass SS41I_binop_rm_int16_y<bits<8> opc, string OpcodeStr,
5574 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5575 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5576 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5578 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
5579 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5580 [(set VR256:$dst, (IntId (load addr:$src)))]>, OpSize;
5583 let Predicates = [HasAVX] in {
5584 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
5586 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
5588 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
5590 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
5592 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
5594 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
5598 let Predicates = [HasAVX2] in {
5599 defm VPMOVSXBW : SS41I_binop_rm_int16_y<0x20, "vpmovsxbw",
5600 int_x86_avx2_pmovsxbw>, VEX, VEX_L;
5601 defm VPMOVSXWD : SS41I_binop_rm_int16_y<0x23, "vpmovsxwd",
5602 int_x86_avx2_pmovsxwd>, VEX, VEX_L;
5603 defm VPMOVSXDQ : SS41I_binop_rm_int16_y<0x25, "vpmovsxdq",
5604 int_x86_avx2_pmovsxdq>, VEX, VEX_L;
5605 defm VPMOVZXBW : SS41I_binop_rm_int16_y<0x30, "vpmovzxbw",
5606 int_x86_avx2_pmovzxbw>, VEX, VEX_L;
5607 defm VPMOVZXWD : SS41I_binop_rm_int16_y<0x33, "vpmovzxwd",
5608 int_x86_avx2_pmovzxwd>, VEX, VEX_L;
5609 defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq",
5610 int_x86_avx2_pmovzxdq>, VEX, VEX_L;
5613 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
5614 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
5615 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
5616 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
5617 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
5618 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
5620 let Predicates = [HasAVX] in {
5621 // Common patterns involving scalar load.
5622 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
5623 (VPMOVSXBWrm addr:$src)>;
5624 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
5625 (VPMOVSXBWrm addr:$src)>;
5626 def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))),
5627 (VPMOVSXBWrm addr:$src)>;
5629 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
5630 (VPMOVSXWDrm addr:$src)>;
5631 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
5632 (VPMOVSXWDrm addr:$src)>;
5633 def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))),
5634 (VPMOVSXWDrm addr:$src)>;
5636 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
5637 (VPMOVSXDQrm addr:$src)>;
5638 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
5639 (VPMOVSXDQrm addr:$src)>;
5640 def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))),
5641 (VPMOVSXDQrm addr:$src)>;
5643 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
5644 (VPMOVZXBWrm addr:$src)>;
5645 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
5646 (VPMOVZXBWrm addr:$src)>;
5647 def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))),
5648 (VPMOVZXBWrm addr:$src)>;
5650 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
5651 (VPMOVZXWDrm addr:$src)>;
5652 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
5653 (VPMOVZXWDrm addr:$src)>;
5654 def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))),
5655 (VPMOVZXWDrm addr:$src)>;
5657 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
5658 (VPMOVZXDQrm addr:$src)>;
5659 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
5660 (VPMOVZXDQrm addr:$src)>;
5661 def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))),
5662 (VPMOVZXDQrm addr:$src)>;
5665 let Predicates = [UseSSE41] in {
5666 // Common patterns involving scalar load.
5667 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
5668 (PMOVSXBWrm addr:$src)>;
5669 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
5670 (PMOVSXBWrm addr:$src)>;
5671 def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))),
5672 (PMOVSXBWrm addr:$src)>;
5674 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
5675 (PMOVSXWDrm addr:$src)>;
5676 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
5677 (PMOVSXWDrm addr:$src)>;
5678 def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))),
5679 (PMOVSXWDrm addr:$src)>;
5681 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
5682 (PMOVSXDQrm addr:$src)>;
5683 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
5684 (PMOVSXDQrm addr:$src)>;
5685 def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))),
5686 (PMOVSXDQrm addr:$src)>;
5688 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
5689 (PMOVZXBWrm addr:$src)>;
5690 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
5691 (PMOVZXBWrm addr:$src)>;
5692 def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))),
5693 (PMOVZXBWrm addr:$src)>;
5695 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
5696 (PMOVZXWDrm addr:$src)>;
5697 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
5698 (PMOVZXWDrm addr:$src)>;
5699 def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))),
5700 (PMOVZXWDrm addr:$src)>;
5702 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
5703 (PMOVZXDQrm addr:$src)>;
5704 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
5705 (PMOVZXDQrm addr:$src)>;
5706 def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))),
5707 (PMOVZXDQrm addr:$src)>;
5710 let Predicates = [HasAVX2] in {
5711 let AddedComplexity = 15 in {
5712 def : Pat<(v4i64 (X86vzmovly (v4i32 VR128:$src))),
5713 (VPMOVZXDQYrr VR128:$src)>;
5714 def : Pat<(v8i32 (X86vzmovly (v8i16 VR128:$src))),
5715 (VPMOVZXWDYrr VR128:$src)>;
5718 def : Pat<(v4i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQYrr VR128:$src)>;
5719 def : Pat<(v8i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDYrr VR128:$src)>;
5722 let Predicates = [HasAVX] in {
5723 def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
5724 def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
5727 let Predicates = [UseSSE41] in {
5728 def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
5729 def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
5733 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5734 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5735 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5736 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5738 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
5739 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5741 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
5745 multiclass SS41I_binop_rm_int8_y<bits<8> opc, string OpcodeStr,
5747 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5748 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5749 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5751 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i32mem:$src),
5752 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5754 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
5758 let Predicates = [HasAVX] in {
5759 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
5761 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
5763 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
5765 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
5769 let Predicates = [HasAVX2] in {
5770 defm VPMOVSXBD : SS41I_binop_rm_int8_y<0x21, "vpmovsxbd",
5771 int_x86_avx2_pmovsxbd>, VEX, VEX_L;
5772 defm VPMOVSXWQ : SS41I_binop_rm_int8_y<0x24, "vpmovsxwq",
5773 int_x86_avx2_pmovsxwq>, VEX, VEX_L;
5774 defm VPMOVZXBD : SS41I_binop_rm_int8_y<0x31, "vpmovzxbd",
5775 int_x86_avx2_pmovzxbd>, VEX, VEX_L;
5776 defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq",
5777 int_x86_avx2_pmovzxwq>, VEX, VEX_L;
5780 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
5781 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
5782 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
5783 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
5785 let Predicates = [HasAVX] in {
5786 // Common patterns involving scalar load
5787 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
5788 (VPMOVSXBDrm addr:$src)>;
5789 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
5790 (VPMOVSXWQrm addr:$src)>;
5792 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
5793 (VPMOVZXBDrm addr:$src)>;
5794 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
5795 (VPMOVZXWQrm addr:$src)>;
5798 let Predicates = [UseSSE41] in {
5799 // Common patterns involving scalar load
5800 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
5801 (PMOVSXBDrm addr:$src)>;
5802 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
5803 (PMOVSXWQrm addr:$src)>;
5805 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
5806 (PMOVZXBDrm addr:$src)>;
5807 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
5808 (PMOVZXWQrm addr:$src)>;
5811 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5812 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5813 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5814 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5816 // Expecting a i16 load any extended to i32 value.
5817 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
5818 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5819 [(set VR128:$dst, (IntId (bitconvert
5820 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
5824 multiclass SS41I_binop_rm_int4_y<bits<8> opc, string OpcodeStr,
5826 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5827 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5828 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5830 // Expecting a i16 load any extended to i32 value.
5831 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i16mem:$src),
5832 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5833 [(set VR256:$dst, (IntId (bitconvert
5834 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
5838 let Predicates = [HasAVX] in {
5839 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
5841 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
5844 let Predicates = [HasAVX2] in {
5845 defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq",
5846 int_x86_avx2_pmovsxbq>, VEX, VEX_L;
5847 defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq",
5848 int_x86_avx2_pmovzxbq>, VEX, VEX_L;
5850 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
5851 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
5853 let Predicates = [HasAVX] in {
5854 // Common patterns involving scalar load
5855 def : Pat<(int_x86_sse41_pmovsxbq
5856 (bitconvert (v4i32 (X86vzmovl
5857 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5858 (VPMOVSXBQrm addr:$src)>;
5860 def : Pat<(int_x86_sse41_pmovzxbq
5861 (bitconvert (v4i32 (X86vzmovl
5862 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5863 (VPMOVZXBQrm addr:$src)>;
5866 let Predicates = [UseSSE41] in {
5867 // Common patterns involving scalar load
5868 def : Pat<(int_x86_sse41_pmovsxbq
5869 (bitconvert (v4i32 (X86vzmovl
5870 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5871 (PMOVSXBQrm addr:$src)>;
5873 def : Pat<(int_x86_sse41_pmovzxbq
5874 (bitconvert (v4i32 (X86vzmovl
5875 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5876 (PMOVZXBQrm addr:$src)>;
5879 let Predicates = [HasAVX2] in {
5880 def : Pat<(v16i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWYrr VR128:$src)>;
5881 def : Pat<(v8i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDYrr VR128:$src)>;
5882 def : Pat<(v4i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQYrr VR128:$src)>;
5884 def : Pat<(v8i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDYrr VR128:$src)>;
5885 def : Pat<(v4i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQYrr VR128:$src)>;
5887 def : Pat<(v4i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQYrr VR128:$src)>;
5889 def : Pat<(v16i16 (X86vzext (v32i8 VR256:$src))),
5890 (VPMOVZXBWYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5891 def : Pat<(v8i32 (X86vzext (v32i8 VR256:$src))),
5892 (VPMOVZXBDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5893 def : Pat<(v4i64 (X86vzext (v32i8 VR256:$src))),
5894 (VPMOVZXBQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5896 def : Pat<(v8i32 (X86vzext (v16i16 VR256:$src))),
5897 (VPMOVZXWDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5898 def : Pat<(v4i64 (X86vzext (v16i16 VR256:$src))),
5899 (VPMOVZXWQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5901 def : Pat<(v4i64 (X86vzext (v8i32 VR256:$src))),
5902 (VPMOVZXDQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5905 let Predicates = [HasAVX] in {
5906 def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWrr VR128:$src)>;
5907 def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDrr VR128:$src)>;
5908 def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQrr VR128:$src)>;
5910 def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDrr VR128:$src)>;
5911 def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQrr VR128:$src)>;
5913 def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQrr VR128:$src)>;
5915 def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5916 (VPMOVZXBWrm addr:$src)>;
5917 def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5918 (VPMOVZXBWrm addr:$src)>;
5919 def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5920 (VPMOVZXBDrm addr:$src)>;
5921 def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))),
5922 (VPMOVZXBQrm addr:$src)>;
5924 def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5925 (VPMOVZXWDrm addr:$src)>;
5926 def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5927 (VPMOVZXWDrm addr:$src)>;
5928 def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5929 (VPMOVZXWQrm addr:$src)>;
5931 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5932 (VPMOVZXDQrm addr:$src)>;
5933 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5934 (VPMOVZXDQrm addr:$src)>;
5935 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))),
5936 (VPMOVZXDQrm addr:$src)>;
5939 let Predicates = [UseSSE41] in {
5940 def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (PMOVZXBWrr VR128:$src)>;
5941 def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (PMOVZXBDrr VR128:$src)>;
5942 def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (PMOVZXBQrr VR128:$src)>;
5944 def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (PMOVZXWDrr VR128:$src)>;
5945 def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (PMOVZXWQrr VR128:$src)>;
5947 def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (PMOVZXDQrr VR128:$src)>;
5949 def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5950 (PMOVZXBWrm addr:$src)>;
5951 def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5952 (PMOVZXBWrm addr:$src)>;
5953 def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5954 (PMOVZXBDrm addr:$src)>;
5955 def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))),
5956 (PMOVZXBQrm addr:$src)>;
5958 def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5959 (PMOVZXWDrm addr:$src)>;
5960 def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5961 (PMOVZXWDrm addr:$src)>;
5962 def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5963 (PMOVZXWQrm addr:$src)>;
5965 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5966 (PMOVZXDQrm addr:$src)>;
5967 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5968 (PMOVZXDQrm addr:$src)>;
5969 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))),
5970 (PMOVZXDQrm addr:$src)>;
5973 //===----------------------------------------------------------------------===//
5974 // SSE4.1 - Extract Instructions
5975 //===----------------------------------------------------------------------===//
5977 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
5978 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
5979 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5980 (ins VR128:$src1, i32i8imm:$src2),
5981 !strconcat(OpcodeStr,
5982 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5983 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
5985 let neverHasSideEffects = 1, mayStore = 1 in
5986 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5987 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
5988 !strconcat(OpcodeStr,
5989 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5992 // There's an AssertZext in the way of writing the store pattern
5993 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5996 let Predicates = [HasAVX] in {
5997 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
5998 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
5999 (ins VR128:$src1, i32i8imm:$src2),
6000 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
6003 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
6006 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
6007 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
6008 let neverHasSideEffects = 1, mayStore = 1 in
6009 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6010 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
6011 !strconcat(OpcodeStr,
6012 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6015 // There's an AssertZext in the way of writing the store pattern
6016 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
6019 let Predicates = [HasAVX] in
6020 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
6022 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
6025 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6026 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
6027 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6028 (ins VR128:$src1, i32i8imm:$src2),
6029 !strconcat(OpcodeStr,
6030 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6032 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
6033 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6034 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
6035 !strconcat(OpcodeStr,
6036 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6037 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
6038 addr:$dst)]>, OpSize;
6041 let Predicates = [HasAVX] in
6042 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
6044 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
6046 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6047 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
6048 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
6049 (ins VR128:$src1, i32i8imm:$src2),
6050 !strconcat(OpcodeStr,
6051 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6053 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
6054 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6055 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
6056 !strconcat(OpcodeStr,
6057 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6058 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
6059 addr:$dst)]>, OpSize, REX_W;
6062 let Predicates = [HasAVX] in
6063 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
6065 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
6067 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
6069 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
6070 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6071 (ins VR128:$src1, i32i8imm:$src2),
6072 !strconcat(OpcodeStr,
6073 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6075 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
6077 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6078 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
6079 !strconcat(OpcodeStr,
6080 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6081 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
6082 addr:$dst)]>, OpSize;
6085 let ExeDomain = SSEPackedSingle in {
6086 let Predicates = [HasAVX] in {
6087 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
6088 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
6089 (ins VR128:$src1, i32i8imm:$src2),
6090 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
6093 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
6096 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
6097 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6100 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6102 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6105 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6106 Requires<[UseSSE41]>;
6108 //===----------------------------------------------------------------------===//
6109 // SSE4.1 - Insert Instructions
6110 //===----------------------------------------------------------------------===//
6112 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
6113 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6114 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
6116 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6118 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6120 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
6121 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6122 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
6124 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6126 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6128 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
6129 imm:$src3))]>, OpSize;
6132 let Predicates = [HasAVX] in
6133 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
6134 let Constraints = "$src1 = $dst" in
6135 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
6137 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
6138 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6139 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
6141 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6143 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6145 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
6147 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6148 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
6150 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6152 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6154 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
6155 imm:$src3)))]>, OpSize;
6158 let Predicates = [HasAVX] in
6159 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
6160 let Constraints = "$src1 = $dst" in
6161 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
6163 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
6164 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6165 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
6167 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6169 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6171 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
6173 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6174 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
6176 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6178 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6180 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
6181 imm:$src3)))]>, OpSize;
6184 let Predicates = [HasAVX] in
6185 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
6186 let Constraints = "$src1 = $dst" in
6187 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
6189 // insertps has a few different modes, there's the first two here below which
6190 // are optimized inserts that won't zero arbitrary elements in the destination
6191 // vector. The next one matches the intrinsic and could zero arbitrary elements
6192 // in the target vector.
6193 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
6194 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6195 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
6197 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6199 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6201 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
6203 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6204 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
6206 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6208 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6210 (X86insrtps VR128:$src1,
6211 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6212 imm:$src3))]>, OpSize;
6215 let ExeDomain = SSEPackedSingle in {
6216 let Predicates = [HasAVX] in
6217 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6218 let Constraints = "$src1 = $dst" in
6219 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
6222 //===----------------------------------------------------------------------===//
6223 // SSE4.1 - Round Instructions
6224 //===----------------------------------------------------------------------===//
6226 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6227 X86MemOperand x86memop, RegisterClass RC,
6228 PatFrag mem_frag32, PatFrag mem_frag64,
6229 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6230 let ExeDomain = SSEPackedSingle in {
6231 // Intrinsic operation, reg.
6232 // Vector intrinsic operation, reg
6233 def PSr : SS4AIi8<opcps, MRMSrcReg,
6234 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
6235 !strconcat(OpcodeStr,
6236 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6237 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
6240 // Vector intrinsic operation, mem
6241 def PSm : SS4AIi8<opcps, MRMSrcMem,
6242 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
6243 !strconcat(OpcodeStr,
6244 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6246 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
6248 } // ExeDomain = SSEPackedSingle
6250 let ExeDomain = SSEPackedDouble in {
6251 // Vector intrinsic operation, reg
6252 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6253 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
6254 !strconcat(OpcodeStr,
6255 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6256 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
6259 // Vector intrinsic operation, mem
6260 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6261 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
6262 !strconcat(OpcodeStr,
6263 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6265 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
6267 } // ExeDomain = SSEPackedDouble
6270 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6273 Intrinsic F64Int, bit Is2Addr = 1> {
6274 let ExeDomain = GenericDomain in {
6276 def SSr : SS4AIi8<opcss, MRMSrcReg,
6277 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32i8imm:$src3),
6279 !strconcat(OpcodeStr,
6280 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6281 !strconcat(OpcodeStr,
6282 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6285 // Intrinsic operation, reg.
6286 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6287 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
6289 !strconcat(OpcodeStr,
6290 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6291 !strconcat(OpcodeStr,
6292 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6293 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6296 // Intrinsic operation, mem.
6297 def SSm : SS4AIi8<opcss, MRMSrcMem,
6298 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
6300 !strconcat(OpcodeStr,
6301 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6302 !strconcat(OpcodeStr,
6303 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6305 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6309 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6310 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32i8imm:$src3),
6312 !strconcat(OpcodeStr,
6313 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6314 !strconcat(OpcodeStr,
6315 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6318 // Intrinsic operation, reg.
6319 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6320 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
6322 !strconcat(OpcodeStr,
6323 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6324 !strconcat(OpcodeStr,
6325 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6326 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6329 // Intrinsic operation, mem.
6330 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6331 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
6333 !strconcat(OpcodeStr,
6334 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6335 !strconcat(OpcodeStr,
6336 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6338 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6340 } // ExeDomain = GenericDomain
6343 // FP round - roundss, roundps, roundsd, roundpd
6344 let Predicates = [HasAVX] in {
6346 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6347 memopv4f32, memopv2f64,
6348 int_x86_sse41_round_ps,
6349 int_x86_sse41_round_pd>, VEX;
6350 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6351 memopv8f32, memopv4f64,
6352 int_x86_avx_round_ps_256,
6353 int_x86_avx_round_pd_256>, VEX, VEX_L;
6354 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6355 int_x86_sse41_round_ss,
6356 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6358 def : Pat<(ffloor FR32:$src),
6359 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6360 def : Pat<(f64 (ffloor FR64:$src)),
6361 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6362 def : Pat<(f32 (fnearbyint FR32:$src)),
6363 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6364 def : Pat<(f64 (fnearbyint FR64:$src)),
6365 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6366 def : Pat<(f32 (fceil FR32:$src)),
6367 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6368 def : Pat<(f64 (fceil FR64:$src)),
6369 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6370 def : Pat<(f32 (frint FR32:$src)),
6371 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6372 def : Pat<(f64 (frint FR64:$src)),
6373 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6374 def : Pat<(f32 (ftrunc FR32:$src)),
6375 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6376 def : Pat<(f64 (ftrunc FR64:$src)),
6377 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6379 def : Pat<(v4f32 (ffloor VR128:$src)),
6380 (VROUNDPSr VR128:$src, (i32 0x1))>;
6381 def : Pat<(v2f64 (ffloor VR128:$src)),
6382 (VROUNDPDr VR128:$src, (i32 0x1))>;
6383 def : Pat<(v8f32 (ffloor VR256:$src)),
6384 (VROUNDYPSr VR256:$src, (i32 0x1))>;
6385 def : Pat<(v4f64 (ffloor VR256:$src)),
6386 (VROUNDYPDr VR256:$src, (i32 0x1))>;
6389 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6390 memopv4f32, memopv2f64,
6391 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6392 let Constraints = "$src1 = $dst" in
6393 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6394 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6396 let Predicates = [UseSSE41] in {
6397 def : Pat<(ffloor FR32:$src),
6398 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6399 def : Pat<(f64 (ffloor FR64:$src)),
6400 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6401 def : Pat<(f32 (fnearbyint FR32:$src)),
6402 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6403 def : Pat<(f64 (fnearbyint FR64:$src)),
6404 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6405 def : Pat<(f32 (fceil FR32:$src)),
6406 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6407 def : Pat<(f64 (fceil FR64:$src)),
6408 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6409 def : Pat<(f32 (frint FR32:$src)),
6410 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6411 def : Pat<(f64 (frint FR64:$src)),
6412 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6413 def : Pat<(f32 (ftrunc FR32:$src)),
6414 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6415 def : Pat<(f64 (ftrunc FR64:$src)),
6416 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6418 def : Pat<(v4f32 (ffloor VR128:$src)),
6419 (ROUNDPSr VR128:$src, (i32 0x1))>;
6420 def : Pat<(v2f64 (ffloor VR128:$src)),
6421 (ROUNDPDr VR128:$src, (i32 0x1))>;
6424 //===----------------------------------------------------------------------===//
6425 // SSE4.1 - Packed Bit Test
6426 //===----------------------------------------------------------------------===//
6428 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6429 // the intel intrinsic that corresponds to this.
6430 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6431 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6432 "vptest\t{$src2, $src1|$src1, $src2}",
6433 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6435 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6436 "vptest\t{$src2, $src1|$src1, $src2}",
6437 [(set EFLAGS,(X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6440 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6441 "vptest\t{$src2, $src1|$src1, $src2}",
6442 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6444 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6445 "vptest\t{$src2, $src1|$src1, $src2}",
6446 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
6450 let Defs = [EFLAGS] in {
6451 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6452 "ptest\t{$src2, $src1|$src1, $src2}",
6453 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6455 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6456 "ptest\t{$src2, $src1|$src1, $src2}",
6457 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6461 // The bit test instructions below are AVX only
6462 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6463 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6464 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6465 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6466 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
6467 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6468 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6469 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6473 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6474 let ExeDomain = SSEPackedSingle in {
6475 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
6476 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>,
6479 let ExeDomain = SSEPackedDouble in {
6480 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
6481 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>,
6486 //===----------------------------------------------------------------------===//
6487 // SSE4.1 - Misc Instructions
6488 //===----------------------------------------------------------------------===//
6490 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6491 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6492 "popcnt{w}\t{$src, $dst|$dst, $src}",
6493 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)]>,
6495 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6496 "popcnt{w}\t{$src, $dst|$dst, $src}",
6497 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6498 (implicit EFLAGS)]>, OpSize, XS;
6500 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6501 "popcnt{l}\t{$src, $dst|$dst, $src}",
6502 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)]>,
6504 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6505 "popcnt{l}\t{$src, $dst|$dst, $src}",
6506 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6507 (implicit EFLAGS)]>, XS;
6509 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6510 "popcnt{q}\t{$src, $dst|$dst, $src}",
6511 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)]>,
6513 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6514 "popcnt{q}\t{$src, $dst|$dst, $src}",
6515 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6516 (implicit EFLAGS)]>, XS;
6521 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6522 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6523 Intrinsic IntId128> {
6524 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6526 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6527 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
6528 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6530 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6533 (bitconvert (memopv2i64 addr:$src))))]>, OpSize;
6536 let Predicates = [HasAVX] in
6537 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6538 int_x86_sse41_phminposuw>, VEX;
6539 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6540 int_x86_sse41_phminposuw>;
6542 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
6543 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
6544 Intrinsic IntId128, bit Is2Addr = 1> {
6545 let isCommutable = 1 in
6546 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6547 (ins VR128:$src1, VR128:$src2),
6549 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6550 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6551 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
6552 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6553 (ins VR128:$src1, i128mem:$src2),
6555 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6556 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6558 (IntId128 VR128:$src1,
6559 (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
6562 /// SS41I_binop_rm_int_y - Simple SSE 4.1 binary operator
6563 multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
6564 Intrinsic IntId256> {
6565 let isCommutable = 1 in
6566 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst),
6567 (ins VR256:$src1, VR256:$src2),
6568 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6569 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>, OpSize;
6570 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst),
6571 (ins VR256:$src1, i256mem:$src2),
6572 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6574 (IntId256 VR256:$src1,
6575 (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
6578 let Predicates = [HasAVX] in {
6579 let isCommutable = 0 in
6580 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
6582 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
6584 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
6586 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
6588 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
6590 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
6592 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
6594 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
6596 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
6598 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
6602 let Predicates = [HasAVX2] in {
6603 let isCommutable = 0 in
6604 defm VPACKUSDW : SS41I_binop_rm_int_y<0x2B, "vpackusdw",
6605 int_x86_avx2_packusdw>, VEX_4V, VEX_L;
6606 defm VPMINSB : SS41I_binop_rm_int_y<0x38, "vpminsb",
6607 int_x86_avx2_pmins_b>, VEX_4V, VEX_L;
6608 defm VPMINSD : SS41I_binop_rm_int_y<0x39, "vpminsd",
6609 int_x86_avx2_pmins_d>, VEX_4V, VEX_L;
6610 defm VPMINUD : SS41I_binop_rm_int_y<0x3B, "vpminud",
6611 int_x86_avx2_pminu_d>, VEX_4V, VEX_L;
6612 defm VPMINUW : SS41I_binop_rm_int_y<0x3A, "vpminuw",
6613 int_x86_avx2_pminu_w>, VEX_4V, VEX_L;
6614 defm VPMAXSB : SS41I_binop_rm_int_y<0x3C, "vpmaxsb",
6615 int_x86_avx2_pmaxs_b>, VEX_4V, VEX_L;
6616 defm VPMAXSD : SS41I_binop_rm_int_y<0x3D, "vpmaxsd",
6617 int_x86_avx2_pmaxs_d>, VEX_4V, VEX_L;
6618 defm VPMAXUD : SS41I_binop_rm_int_y<0x3F, "vpmaxud",
6619 int_x86_avx2_pmaxu_d>, VEX_4V, VEX_L;
6620 defm VPMAXUW : SS41I_binop_rm_int_y<0x3E, "vpmaxuw",
6621 int_x86_avx2_pmaxu_w>, VEX_4V, VEX_L;
6622 defm VPMULDQ : SS41I_binop_rm_int_y<0x28, "vpmuldq",
6623 int_x86_avx2_pmul_dq>, VEX_4V, VEX_L;
6626 let Constraints = "$src1 = $dst" in {
6627 let isCommutable = 0 in
6628 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
6629 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
6630 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
6631 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
6632 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
6633 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
6634 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
6635 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
6636 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
6637 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
6640 /// SS48I_binop_rm - Simple SSE41 binary operator.
6641 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6642 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6643 X86MemOperand x86memop, bit Is2Addr = 1> {
6644 let isCommutable = 1 in
6645 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6646 (ins RC:$src1, RC:$src2),
6648 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6649 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6650 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>, OpSize;
6651 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6652 (ins RC:$src1, x86memop:$src2),
6654 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6655 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6657 (OpVT (OpNode RC:$src1,
6658 (bitconvert (memop_frag addr:$src2)))))]>, OpSize;
6661 let Predicates = [HasAVX] in {
6662 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6663 memopv2i64, i128mem, 0>, VEX_4V;
6664 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6665 memopv2i64, i128mem, 0>, VEX_4V;
6667 let Predicates = [HasAVX2] in {
6668 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6669 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6670 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6671 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6674 let Constraints = "$src1 = $dst" in {
6675 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6676 memopv2i64, i128mem>;
6677 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6678 memopv2i64, i128mem>;
6681 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
6682 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
6683 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
6684 X86MemOperand x86memop, bit Is2Addr = 1> {
6685 let isCommutable = 1 in
6686 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6687 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
6689 !strconcat(OpcodeStr,
6690 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6691 !strconcat(OpcodeStr,
6692 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6693 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
6695 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6696 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
6698 !strconcat(OpcodeStr,
6699 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6700 !strconcat(OpcodeStr,
6701 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6704 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
6708 let Predicates = [HasAVX] in {
6709 let isCommutable = 0 in {
6710 let ExeDomain = SSEPackedSingle in {
6711 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
6712 VR128, memopv4f32, f128mem, 0>, VEX_4V;
6713 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
6714 int_x86_avx_blend_ps_256, VR256, memopv8f32,
6715 f256mem, 0>, VEX_4V, VEX_L;
6717 let ExeDomain = SSEPackedDouble in {
6718 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
6719 VR128, memopv2f64, f128mem, 0>, VEX_4V;
6720 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
6721 int_x86_avx_blend_pd_256,VR256, memopv4f64,
6722 f256mem, 0>, VEX_4V, VEX_L;
6724 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
6725 VR128, memopv2i64, i128mem, 0>, VEX_4V;
6726 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
6727 VR128, memopv2i64, i128mem, 0>, VEX_4V;
6729 let ExeDomain = SSEPackedSingle in
6730 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
6731 VR128, memopv4f32, f128mem, 0>, VEX_4V;
6732 let ExeDomain = SSEPackedDouble in
6733 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
6734 VR128, memopv2f64, f128mem, 0>, VEX_4V;
6735 let ExeDomain = SSEPackedSingle in
6736 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
6737 VR256, memopv8f32, i256mem, 0>, VEX_4V, VEX_L;
6740 let Predicates = [HasAVX2] in {
6741 let isCommutable = 0 in {
6742 defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
6743 VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6744 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
6745 VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6749 let Constraints = "$src1 = $dst" in {
6750 let isCommutable = 0 in {
6751 let ExeDomain = SSEPackedSingle in
6752 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
6753 VR128, memopv4f32, f128mem>;
6754 let ExeDomain = SSEPackedDouble in
6755 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
6756 VR128, memopv2f64, f128mem>;
6757 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
6758 VR128, memopv2i64, i128mem>;
6759 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
6760 VR128, memopv2i64, i128mem>;
6762 let ExeDomain = SSEPackedSingle in
6763 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
6764 VR128, memopv4f32, f128mem>;
6765 let ExeDomain = SSEPackedDouble in
6766 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
6767 VR128, memopv2f64, f128mem>;
6770 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
6771 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
6772 RegisterClass RC, X86MemOperand x86memop,
6773 PatFrag mem_frag, Intrinsic IntId> {
6774 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
6775 (ins RC:$src1, RC:$src2, RC:$src3),
6776 !strconcat(OpcodeStr,
6777 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6778 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
6779 IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
6781 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
6782 (ins RC:$src1, x86memop:$src2, RC:$src3),
6783 !strconcat(OpcodeStr,
6784 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6786 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
6788 IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
6791 let Predicates = [HasAVX] in {
6792 let ExeDomain = SSEPackedDouble in {
6793 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
6794 memopv2f64, int_x86_sse41_blendvpd>;
6795 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
6796 memopv4f64, int_x86_avx_blendv_pd_256>, VEX_L;
6797 } // ExeDomain = SSEPackedDouble
6798 let ExeDomain = SSEPackedSingle in {
6799 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
6800 memopv4f32, int_x86_sse41_blendvps>;
6801 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
6802 memopv8f32, int_x86_avx_blendv_ps_256>, VEX_L;
6803 } // ExeDomain = SSEPackedSingle
6804 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
6805 memopv2i64, int_x86_sse41_pblendvb>;
6808 let Predicates = [HasAVX2] in {
6809 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
6810 memopv4i64, int_x86_avx2_pblendvb>, VEX_L;
6813 let Predicates = [HasAVX] in {
6814 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
6815 (v16i8 VR128:$src2))),
6816 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6817 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
6818 (v4i32 VR128:$src2))),
6819 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6820 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
6821 (v4f32 VR128:$src2))),
6822 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6823 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
6824 (v2i64 VR128:$src2))),
6825 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6826 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
6827 (v2f64 VR128:$src2))),
6828 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6829 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
6830 (v8i32 VR256:$src2))),
6831 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6832 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
6833 (v8f32 VR256:$src2))),
6834 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6835 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
6836 (v4i64 VR256:$src2))),
6837 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6838 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
6839 (v4f64 VR256:$src2))),
6840 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6842 def : Pat<(v8f32 (X86Blendps (v8f32 VR256:$src1), (v8f32 VR256:$src2),
6844 (VBLENDPSYrri VR256:$src2, VR256:$src1, imm:$mask)>;
6845 def : Pat<(v4f64 (X86Blendpd (v4f64 VR256:$src1), (v4f64 VR256:$src2),
6847 (VBLENDPDYrri VR256:$src2, VR256:$src1, imm:$mask)>;
6849 def : Pat<(v8i16 (X86Blendpw (v8i16 VR128:$src1), (v8i16 VR128:$src2),
6851 (VPBLENDWrri VR128:$src2, VR128:$src1, imm:$mask)>;
6852 def : Pat<(v4f32 (X86Blendps (v4f32 VR128:$src1), (v4f32 VR128:$src2),
6854 (VBLENDPSrri VR128:$src2, VR128:$src1, imm:$mask)>;
6855 def : Pat<(v2f64 (X86Blendpd (v2f64 VR128:$src1), (v2f64 VR128:$src2),
6857 (VBLENDPDrri VR128:$src2, VR128:$src1, imm:$mask)>;
6860 let Predicates = [HasAVX2] in {
6861 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
6862 (v32i8 VR256:$src2))),
6863 (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6864 def : Pat<(v16i16 (X86Blendpw (v16i16 VR256:$src1), (v16i16 VR256:$src2),
6866 (VPBLENDWYrri VR256:$src2, VR256:$src1, imm:$mask)>;
6869 /// SS41I_ternary_int - SSE 4.1 ternary operator
6870 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
6871 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
6872 X86MemOperand x86memop, Intrinsic IntId> {
6873 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6874 (ins VR128:$src1, VR128:$src2),
6875 !strconcat(OpcodeStr,
6876 "\t{$src2, $dst|$dst, $src2}"),
6877 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
6880 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6881 (ins VR128:$src1, x86memop:$src2),
6882 !strconcat(OpcodeStr,
6883 "\t{$src2, $dst|$dst, $src2}"),
6886 (bitconvert (mem_frag addr:$src2)), XMM0))]>, OpSize;
6890 let ExeDomain = SSEPackedDouble in
6891 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
6892 int_x86_sse41_blendvpd>;
6893 let ExeDomain = SSEPackedSingle in
6894 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
6895 int_x86_sse41_blendvps>;
6896 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
6897 int_x86_sse41_pblendvb>;
6899 // Aliases with the implicit xmm0 argument
6900 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6901 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
6902 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6903 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
6904 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6905 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
6906 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6907 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
6908 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6909 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
6910 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6911 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
6913 let Predicates = [UseSSE41] in {
6914 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
6915 (v16i8 VR128:$src2))),
6916 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
6917 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
6918 (v4i32 VR128:$src2))),
6919 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
6920 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
6921 (v4f32 VR128:$src2))),
6922 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
6923 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
6924 (v2i64 VR128:$src2))),
6925 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
6926 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
6927 (v2f64 VR128:$src2))),
6928 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
6930 def : Pat<(v8i16 (X86Blendpw (v8i16 VR128:$src1), (v8i16 VR128:$src2),
6932 (PBLENDWrri VR128:$src2, VR128:$src1, imm:$mask)>;
6933 def : Pat<(v4f32 (X86Blendps (v4f32 VR128:$src1), (v4f32 VR128:$src2),
6935 (BLENDPSrri VR128:$src2, VR128:$src1, imm:$mask)>;
6936 def : Pat<(v2f64 (X86Blendpd (v2f64 VR128:$src1), (v2f64 VR128:$src2),
6938 (BLENDPDrri VR128:$src2, VR128:$src1, imm:$mask)>;
6942 let Predicates = [HasAVX] in
6943 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6944 "vmovntdqa\t{$src, $dst|$dst, $src}",
6945 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
6947 let Predicates = [HasAVX2] in
6948 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
6949 "vmovntdqa\t{$src, $dst|$dst, $src}",
6950 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
6952 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6953 "movntdqa\t{$src, $dst|$dst, $src}",
6954 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
6957 //===----------------------------------------------------------------------===//
6958 // SSE4.2 - Compare Instructions
6959 //===----------------------------------------------------------------------===//
6961 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
6962 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6963 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6964 X86MemOperand x86memop, bit Is2Addr = 1> {
6965 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
6966 (ins RC:$src1, RC:$src2),
6968 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6969 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6970 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6972 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
6973 (ins RC:$src1, x86memop:$src2),
6975 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6976 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6978 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>, OpSize;
6981 let Predicates = [HasAVX] in
6982 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
6983 memopv2i64, i128mem, 0>, VEX_4V;
6985 let Predicates = [HasAVX2] in
6986 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
6987 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6989 let Constraints = "$src1 = $dst" in
6990 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
6991 memopv2i64, i128mem>;
6993 //===----------------------------------------------------------------------===//
6994 // SSE4.2 - String/text Processing Instructions
6995 //===----------------------------------------------------------------------===//
6997 // Packed Compare Implicit Length Strings, Return Mask
6998 multiclass pseudo_pcmpistrm<string asm> {
6999 def REG : PseudoI<(outs VR128:$dst),
7000 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7001 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
7003 def MEM : PseudoI<(outs VR128:$dst),
7004 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7005 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
7006 (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
7009 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7010 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
7011 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[UseSSE42]>;
7014 multiclass pcmpistrm_SS42AI<string asm> {
7015 def rr : SS42AI<0x62, MRMSrcReg, (outs),
7016 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7017 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7020 def rm :SS42AI<0x62, MRMSrcMem, (outs),
7021 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7022 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7026 let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in {
7027 let Predicates = [HasAVX] in
7028 defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
7029 defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
7032 // Packed Compare Explicit Length Strings, Return Mask
7033 multiclass pseudo_pcmpestrm<string asm> {
7034 def REG : PseudoI<(outs VR128:$dst),
7035 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7036 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
7037 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7038 def MEM : PseudoI<(outs VR128:$dst),
7039 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7040 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
7041 (bc_v16i8 (memopv2i64 addr:$src3)), EDX, imm:$src5))]>;
7044 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7045 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
7046 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[UseSSE42]>;
7049 multiclass SS42AI_pcmpestrm<string asm> {
7050 def rr : SS42AI<0x60, MRMSrcReg, (outs),
7051 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7052 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7055 def rm : SS42AI<0x60, MRMSrcMem, (outs),
7056 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7057 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7061 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
7062 let Predicates = [HasAVX] in
7063 defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
7064 defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
7067 // Packed Compare Implicit Length Strings, Return Index
7068 multiclass pseudo_pcmpistri<string asm> {
7069 def REG : PseudoI<(outs GR32:$dst),
7070 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7071 [(set GR32:$dst, EFLAGS,
7072 (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
7073 def MEM : PseudoI<(outs GR32:$dst),
7074 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7075 [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
7076 (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
7079 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7080 defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI">, Requires<[HasAVX]>;
7081 defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI">, Requires<[UseSSE42]>;
7084 multiclass SS42AI_pcmpistri<string asm> {
7085 def rr : SS42AI<0x63, MRMSrcReg, (outs),
7086 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7087 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7090 def rm : SS42AI<0x63, MRMSrcMem, (outs),
7091 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7092 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7096 let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in {
7097 let Predicates = [HasAVX] in
7098 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
7099 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
7102 // Packed Compare Explicit Length Strings, Return Index
7103 multiclass pseudo_pcmpestri<string asm> {
7104 def REG : PseudoI<(outs GR32:$dst),
7105 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7106 [(set GR32:$dst, EFLAGS,
7107 (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7108 def MEM : PseudoI<(outs GR32:$dst),
7109 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7110 [(set GR32:$dst, EFLAGS,
7111 (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (memopv2i64 addr:$src3)), EDX,
7115 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7116 defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI">, Requires<[HasAVX]>;
7117 defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI">, Requires<[UseSSE42]>;
7120 multiclass SS42AI_pcmpestri<string asm> {
7121 def rr : SS42AI<0x61, MRMSrcReg, (outs),
7122 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7123 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7126 def rm : SS42AI<0x61, MRMSrcMem, (outs),
7127 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7128 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7132 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
7133 let Predicates = [HasAVX] in
7134 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
7135 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
7138 //===----------------------------------------------------------------------===//
7139 // SSE4.2 - CRC Instructions
7140 //===----------------------------------------------------------------------===//
7142 // No CRC instructions have AVX equivalents
7144 // crc intrinsic instruction
7145 // This set of instructions are only rm, the only difference is the size
7147 let Constraints = "$src1 = $dst" in {
7148 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
7149 (ins GR32:$src1, i8mem:$src2),
7150 "crc32{b} \t{$src2, $src1|$src1, $src2}",
7152 (int_x86_sse42_crc32_32_8 GR32:$src1,
7153 (load addr:$src2)))]>;
7154 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
7155 (ins GR32:$src1, GR8:$src2),
7156 "crc32{b} \t{$src2, $src1|$src1, $src2}",
7158 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
7159 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
7160 (ins GR32:$src1, i16mem:$src2),
7161 "crc32{w} \t{$src2, $src1|$src1, $src2}",
7163 (int_x86_sse42_crc32_32_16 GR32:$src1,
7164 (load addr:$src2)))]>,
7166 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
7167 (ins GR32:$src1, GR16:$src2),
7168 "crc32{w} \t{$src2, $src1|$src1, $src2}",
7170 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
7172 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
7173 (ins GR32:$src1, i32mem:$src2),
7174 "crc32{l} \t{$src2, $src1|$src1, $src2}",
7176 (int_x86_sse42_crc32_32_32 GR32:$src1,
7177 (load addr:$src2)))]>;
7178 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
7179 (ins GR32:$src1, GR32:$src2),
7180 "crc32{l} \t{$src2, $src1|$src1, $src2}",
7182 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
7183 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
7184 (ins GR64:$src1, i8mem:$src2),
7185 "crc32{b} \t{$src2, $src1|$src1, $src2}",
7187 (int_x86_sse42_crc32_64_8 GR64:$src1,
7188 (load addr:$src2)))]>,
7190 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
7191 (ins GR64:$src1, GR8:$src2),
7192 "crc32{b} \t{$src2, $src1|$src1, $src2}",
7194 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
7196 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
7197 (ins GR64:$src1, i64mem:$src2),
7198 "crc32{q} \t{$src2, $src1|$src1, $src2}",
7200 (int_x86_sse42_crc32_64_64 GR64:$src1,
7201 (load addr:$src2)))]>,
7203 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
7204 (ins GR64:$src1, GR64:$src2),
7205 "crc32{q} \t{$src2, $src1|$src1, $src2}",
7207 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
7211 //===----------------------------------------------------------------------===//
7212 // AES-NI Instructions
7213 //===----------------------------------------------------------------------===//
7215 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
7216 Intrinsic IntId128, bit Is2Addr = 1> {
7217 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
7218 (ins VR128:$src1, VR128:$src2),
7220 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7221 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7222 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
7224 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
7225 (ins VR128:$src1, i128mem:$src2),
7227 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7228 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7230 (IntId128 VR128:$src1, (memopv2i64 addr:$src2)))]>, OpSize;
7233 // Perform One Round of an AES Encryption/Decryption Flow
7234 let Predicates = [HasAVX, HasAES] in {
7235 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7236 int_x86_aesni_aesenc, 0>, VEX_4V;
7237 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7238 int_x86_aesni_aesenclast, 0>, VEX_4V;
7239 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7240 int_x86_aesni_aesdec, 0>, VEX_4V;
7241 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7242 int_x86_aesni_aesdeclast, 0>, VEX_4V;
7245 let Constraints = "$src1 = $dst" in {
7246 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7247 int_x86_aesni_aesenc>;
7248 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7249 int_x86_aesni_aesenclast>;
7250 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7251 int_x86_aesni_aesdec>;
7252 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7253 int_x86_aesni_aesdeclast>;
7256 // Perform the AES InvMixColumn Transformation
7257 let Predicates = [HasAVX, HasAES] in {
7258 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7260 "vaesimc\t{$src1, $dst|$dst, $src1}",
7262 (int_x86_aesni_aesimc VR128:$src1))]>,
7264 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7265 (ins i128mem:$src1),
7266 "vaesimc\t{$src1, $dst|$dst, $src1}",
7267 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7270 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7272 "aesimc\t{$src1, $dst|$dst, $src1}",
7274 (int_x86_aesni_aesimc VR128:$src1))]>,
7276 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7277 (ins i128mem:$src1),
7278 "aesimc\t{$src1, $dst|$dst, $src1}",
7279 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7282 // AES Round Key Generation Assist
7283 let Predicates = [HasAVX, HasAES] in {
7284 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7285 (ins VR128:$src1, i8imm:$src2),
7286 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7288 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7290 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7291 (ins i128mem:$src1, i8imm:$src2),
7292 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7294 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7297 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7298 (ins VR128:$src1, i8imm:$src2),
7299 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7301 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7303 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7304 (ins i128mem:$src1, i8imm:$src2),
7305 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7307 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7310 //===----------------------------------------------------------------------===//
7311 // PCLMUL Instructions
7312 //===----------------------------------------------------------------------===//
7314 // AVX carry-less Multiplication instructions
7315 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7316 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7317 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7319 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
7321 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7322 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7323 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7324 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7325 (memopv2i64 addr:$src2), imm:$src3))]>;
7327 // Carry-less Multiplication instructions
7328 let Constraints = "$src1 = $dst" in {
7329 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7330 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7331 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7333 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
7335 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7336 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7337 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7338 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7339 (memopv2i64 addr:$src2), imm:$src3))]>;
7340 } // Constraints = "$src1 = $dst"
7343 multiclass pclmul_alias<string asm, int immop> {
7344 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7345 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
7347 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7348 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
7350 def : InstAlias<!strconcat("vpclmul", asm,
7351 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7352 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
7354 def : InstAlias<!strconcat("vpclmul", asm,
7355 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7356 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
7358 defm : pclmul_alias<"hqhq", 0x11>;
7359 defm : pclmul_alias<"hqlq", 0x01>;
7360 defm : pclmul_alias<"lqhq", 0x10>;
7361 defm : pclmul_alias<"lqlq", 0x00>;
7363 //===----------------------------------------------------------------------===//
7364 // SSE4A Instructions
7365 //===----------------------------------------------------------------------===//
7367 let Predicates = [HasSSE4A] in {
7369 let Constraints = "$src = $dst" in {
7370 def EXTRQI : Ii8<0x78, MRM0r, (outs VR128:$dst),
7371 (ins VR128:$src, i8imm:$len, i8imm:$idx),
7372 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7373 [(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
7374 imm:$idx))]>, TB, OpSize;
7375 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7376 (ins VR128:$src, VR128:$mask),
7377 "extrq\t{$mask, $src|$src, $mask}",
7378 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7379 VR128:$mask))]>, TB, OpSize;
7381 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7382 (ins VR128:$src, VR128:$src2, i8imm:$len, i8imm:$idx),
7383 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7384 [(set VR128:$dst, (int_x86_sse4a_insertqi VR128:$src,
7385 VR128:$src2, imm:$len, imm:$idx))]>, XD;
7386 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7387 (ins VR128:$src, VR128:$mask),
7388 "insertq\t{$mask, $src|$src, $mask}",
7389 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7390 VR128:$mask))]>, XD;
7393 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7394 "movntss\t{$src, $dst|$dst, $src}",
7395 [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
7397 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7398 "movntsd\t{$src, $dst|$dst, $src}",
7399 [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
7402 //===----------------------------------------------------------------------===//
7404 //===----------------------------------------------------------------------===//
7406 //===----------------------------------------------------------------------===//
7407 // VBROADCAST - Load from memory and broadcast to all elements of the
7408 // destination operand
7410 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
7411 X86MemOperand x86memop, Intrinsic Int> :
7412 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7413 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7414 [(set RC:$dst, (Int addr:$src))]>, VEX;
7416 // AVX2 adds register forms
7417 class avx2_broadcast_reg<bits<8> opc, string OpcodeStr, RegisterClass RC,
7419 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7420 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7421 [(set RC:$dst, (Int VR128:$src))]>, VEX;
7423 let ExeDomain = SSEPackedSingle in {
7424 def VBROADCASTSSrm : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
7425 int_x86_avx_vbroadcast_ss>;
7426 def VBROADCASTSSYrm : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
7427 int_x86_avx_vbroadcast_ss_256>, VEX_L;
7429 let ExeDomain = SSEPackedDouble in
7430 def VBROADCASTSDYrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
7431 int_x86_avx_vbroadcast_sd_256>, VEX_L;
7432 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
7433 int_x86_avx_vbroadcastf128_pd_256>, VEX_L;
7435 let ExeDomain = SSEPackedSingle in {
7436 def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
7437 int_x86_avx2_vbroadcast_ss_ps>;
7438 def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
7439 int_x86_avx2_vbroadcast_ss_ps_256>, VEX_L;
7441 let ExeDomain = SSEPackedDouble in
7442 def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
7443 int_x86_avx2_vbroadcast_sd_pd_256>, VEX_L;
7445 let Predicates = [HasAVX2] in
7446 def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
7447 int_x86_avx2_vbroadcasti128>, VEX_L;
7449 let Predicates = [HasAVX] in
7450 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
7451 (VBROADCASTF128 addr:$src)>;
7454 //===----------------------------------------------------------------------===//
7455 // VINSERTF128 - Insert packed floating-point values
7457 let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
7458 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7459 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
7460 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7463 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7464 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
7465 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7469 let Predicates = [HasAVX] in {
7470 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
7472 (VINSERTF128rr VR256:$src1, VR128:$src2,
7473 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7474 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
7476 (VINSERTF128rr VR256:$src1, VR128:$src2,
7477 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7479 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (memopv4f32 addr:$src2),
7481 (VINSERTF128rm VR256:$src1, addr:$src2,
7482 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7483 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (memopv2f64 addr:$src2),
7485 (VINSERTF128rm VR256:$src1, addr:$src2,
7486 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7489 let Predicates = [HasAVX1Only] in {
7490 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7492 (VINSERTF128rr VR256:$src1, VR128:$src2,
7493 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7494 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7496 (VINSERTF128rr VR256:$src1, VR128:$src2,
7497 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7498 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7500 (VINSERTF128rr VR256:$src1, VR128:$src2,
7501 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7502 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7504 (VINSERTF128rr VR256:$src1, VR128:$src2,
7505 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7507 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
7509 (VINSERTF128rm VR256:$src1, addr:$src2,
7510 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7511 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
7512 (bc_v4i32 (memopv2i64 addr:$src2)),
7514 (VINSERTF128rm VR256:$src1, addr:$src2,
7515 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7516 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
7517 (bc_v16i8 (memopv2i64 addr:$src2)),
7519 (VINSERTF128rm VR256:$src1, addr:$src2,
7520 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7521 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
7522 (bc_v8i16 (memopv2i64 addr:$src2)),
7524 (VINSERTF128rm VR256:$src1, addr:$src2,
7525 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7528 //===----------------------------------------------------------------------===//
7529 // VEXTRACTF128 - Extract packed floating-point values
7531 let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
7532 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7533 (ins VR256:$src1, i8imm:$src2),
7534 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7537 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7538 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
7539 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7544 let Predicates = [HasAVX] in {
7545 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7546 (v4f32 (VEXTRACTF128rr
7547 (v8f32 VR256:$src1),
7548 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7549 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7550 (v2f64 (VEXTRACTF128rr
7551 (v4f64 VR256:$src1),
7552 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7554 def : Pat<(alignedstore (v4f32 (vextractf128_extract:$ext (v8f32 VR256:$src1),
7555 (iPTR imm))), addr:$dst),
7556 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7557 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7558 def : Pat<(alignedstore (v2f64 (vextractf128_extract:$ext (v4f64 VR256:$src1),
7559 (iPTR imm))), addr:$dst),
7560 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7561 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7564 let Predicates = [HasAVX1Only] in {
7565 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7566 (v2i64 (VEXTRACTF128rr
7567 (v4i64 VR256:$src1),
7568 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7569 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7570 (v4i32 (VEXTRACTF128rr
7571 (v8i32 VR256:$src1),
7572 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7573 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7574 (v8i16 (VEXTRACTF128rr
7575 (v16i16 VR256:$src1),
7576 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7577 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7578 (v16i8 (VEXTRACTF128rr
7579 (v32i8 VR256:$src1),
7580 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7582 def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1),
7583 (iPTR imm))), addr:$dst),
7584 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7585 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7586 def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1),
7587 (iPTR imm))), addr:$dst),
7588 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7589 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7590 def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1),
7591 (iPTR imm))), addr:$dst),
7592 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7593 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7594 def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1),
7595 (iPTR imm))), addr:$dst),
7596 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7597 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7600 //===----------------------------------------------------------------------===//
7601 // VMASKMOV - Conditional SIMD Packed Loads and Stores
7603 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
7604 Intrinsic IntLd, Intrinsic IntLd256,
7605 Intrinsic IntSt, Intrinsic IntSt256> {
7606 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
7607 (ins VR128:$src1, f128mem:$src2),
7608 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7609 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
7611 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
7612 (ins VR256:$src1, f256mem:$src2),
7613 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7614 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
7616 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
7617 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
7618 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7619 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
7620 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
7621 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
7622 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7623 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
7626 let ExeDomain = SSEPackedSingle in
7627 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
7628 int_x86_avx_maskload_ps,
7629 int_x86_avx_maskload_ps_256,
7630 int_x86_avx_maskstore_ps,
7631 int_x86_avx_maskstore_ps_256>;
7632 let ExeDomain = SSEPackedDouble in
7633 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
7634 int_x86_avx_maskload_pd,
7635 int_x86_avx_maskload_pd_256,
7636 int_x86_avx_maskstore_pd,
7637 int_x86_avx_maskstore_pd_256>;
7639 //===----------------------------------------------------------------------===//
7640 // VPERMIL - Permute Single and Double Floating-Point Values
7642 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
7643 RegisterClass RC, X86MemOperand x86memop_f,
7644 X86MemOperand x86memop_i, PatFrag i_frag,
7645 Intrinsic IntVar, ValueType vt> {
7646 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
7647 (ins RC:$src1, RC:$src2),
7648 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7649 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
7650 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
7651 (ins RC:$src1, x86memop_i:$src2),
7652 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7653 [(set RC:$dst, (IntVar RC:$src1,
7654 (bitconvert (i_frag addr:$src2))))]>, VEX_4V;
7656 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
7657 (ins RC:$src1, i8imm:$src2),
7658 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7659 [(set RC:$dst, (vt (X86VPermilp RC:$src1, (i8 imm:$src2))))]>, VEX;
7660 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
7661 (ins x86memop_f:$src1, i8imm:$src2),
7662 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7664 (vt (X86VPermilp (memop addr:$src1), (i8 imm:$src2))))]>, VEX;
7667 let ExeDomain = SSEPackedSingle in {
7668 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
7669 memopv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
7670 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
7671 memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
7673 let ExeDomain = SSEPackedDouble in {
7674 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
7675 memopv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
7676 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
7677 memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
7680 let Predicates = [HasAVX] in {
7681 def : Pat<(v8i32 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
7682 (VPERMILPSYri VR256:$src1, imm:$imm)>;
7683 def : Pat<(v4i64 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
7684 (VPERMILPDYri VR256:$src1, imm:$imm)>;
7685 def : Pat<(v8i32 (X86VPermilp (bc_v8i32 (memopv4i64 addr:$src1)),
7687 (VPERMILPSYmi addr:$src1, imm:$imm)>;
7688 def : Pat<(v4i64 (X86VPermilp (memopv4i64 addr:$src1), (i8 imm:$imm))),
7689 (VPERMILPDYmi addr:$src1, imm:$imm)>;
7691 def : Pat<(v2i64 (X86VPermilp VR128:$src1, (i8 imm:$imm))),
7692 (VPERMILPDri VR128:$src1, imm:$imm)>;
7693 def : Pat<(v2i64 (X86VPermilp (memopv2i64 addr:$src1), (i8 imm:$imm))),
7694 (VPERMILPDmi addr:$src1, imm:$imm)>;
7697 //===----------------------------------------------------------------------===//
7698 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
7700 let ExeDomain = SSEPackedSingle in {
7701 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
7702 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
7703 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7704 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
7705 (i8 imm:$src3))))]>, VEX_4V, VEX_L;
7706 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
7707 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
7708 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7709 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv8f32 addr:$src2),
7710 (i8 imm:$src3)))]>, VEX_4V, VEX_L;
7713 let Predicates = [HasAVX] in {
7714 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7715 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7716 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
7717 (memopv4f64 addr:$src2), (i8 imm:$imm))),
7718 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7721 let Predicates = [HasAVX1Only] in {
7722 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7723 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7724 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7725 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7726 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7727 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7728 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7729 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7731 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
7732 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7733 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7734 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
7735 (memopv4i64 addr:$src2), (i8 imm:$imm))),
7736 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7737 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
7738 (bc_v32i8 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7739 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7740 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
7741 (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7742 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7745 //===----------------------------------------------------------------------===//
7746 // VZERO - Zero YMM registers
7748 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
7749 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
7750 // Zero All YMM registers
7751 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
7752 [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
7754 // Zero Upper bits of YMM registers
7755 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
7756 [(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;
7759 //===----------------------------------------------------------------------===//
7760 // Half precision conversion instructions
7761 //===----------------------------------------------------------------------===//
7762 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
7763 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7764 "vcvtph2ps\t{$src, $dst|$dst, $src}",
7765 [(set RC:$dst, (Int VR128:$src))]>,
7767 let neverHasSideEffects = 1, mayLoad = 1 in
7768 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7769 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
7772 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
7773 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
7774 (ins RC:$src1, i32i8imm:$src2),
7775 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7776 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
7778 let neverHasSideEffects = 1, mayStore = 1 in
7779 def mr : Ii8<0x1D, MRMDestMem, (outs),
7780 (ins x86memop:$dst, RC:$src1, i32i8imm:$src2),
7781 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
7785 let Predicates = [HasAVX, HasF16C] in {
7786 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
7787 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
7788 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
7789 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
7792 //===----------------------------------------------------------------------===//
7793 // AVX2 Instructions
7794 //===----------------------------------------------------------------------===//
7796 /// AVX2_binop_rmi_int - AVX2 binary operator with 8-bit immediate
7797 multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
7798 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
7799 X86MemOperand x86memop> {
7800 let isCommutable = 1 in
7801 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
7802 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
7803 !strconcat(OpcodeStr,
7804 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7805 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
7807 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
7808 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
7809 !strconcat(OpcodeStr,
7810 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7813 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
7817 let isCommutable = 0 in {
7818 defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
7819 VR128, memopv2i64, i128mem>;
7820 defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
7821 VR256, memopv4i64, i256mem>, VEX_L;
7824 //===----------------------------------------------------------------------===//
7825 // VPBROADCAST - Load from memory and broadcast to all elements of the
7826 // destination operand
7828 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
7829 X86MemOperand x86memop, PatFrag ld_frag,
7830 Intrinsic Int128, Intrinsic Int256> {
7831 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
7832 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7833 [(set VR128:$dst, (Int128 VR128:$src))]>, VEX;
7834 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
7835 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7837 (Int128 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
7838 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
7839 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7840 [(set VR256:$dst, (Int256 VR128:$src))]>, VEX, VEX_L;
7841 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
7842 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7844 (Int256 (scalar_to_vector (ld_frag addr:$src))))]>,
7848 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
7849 int_x86_avx2_pbroadcastb_128,
7850 int_x86_avx2_pbroadcastb_256>;
7851 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
7852 int_x86_avx2_pbroadcastw_128,
7853 int_x86_avx2_pbroadcastw_256>;
7854 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
7855 int_x86_avx2_pbroadcastd_128,
7856 int_x86_avx2_pbroadcastd_256>;
7857 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
7858 int_x86_avx2_pbroadcastq_128,
7859 int_x86_avx2_pbroadcastq_256>;
7861 let Predicates = [HasAVX2] in {
7862 def : Pat<(v16i8 (X86VBroadcast (loadi8 addr:$src))),
7863 (VPBROADCASTBrm addr:$src)>;
7864 def : Pat<(v32i8 (X86VBroadcast (loadi8 addr:$src))),
7865 (VPBROADCASTBYrm addr:$src)>;
7866 def : Pat<(v8i16 (X86VBroadcast (loadi16 addr:$src))),
7867 (VPBROADCASTWrm addr:$src)>;
7868 def : Pat<(v16i16 (X86VBroadcast (loadi16 addr:$src))),
7869 (VPBROADCASTWYrm addr:$src)>;
7870 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
7871 (VPBROADCASTDrm addr:$src)>;
7872 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
7873 (VPBROADCASTDYrm addr:$src)>;
7874 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
7875 (VPBROADCASTQrm addr:$src)>;
7876 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
7877 (VPBROADCASTQYrm addr:$src)>;
7879 def : Pat<(v16i8 (X86VBroadcast (v16i8 VR128:$src))),
7880 (VPBROADCASTBrr VR128:$src)>;
7881 def : Pat<(v32i8 (X86VBroadcast (v16i8 VR128:$src))),
7882 (VPBROADCASTBYrr VR128:$src)>;
7883 def : Pat<(v8i16 (X86VBroadcast (v8i16 VR128:$src))),
7884 (VPBROADCASTWrr VR128:$src)>;
7885 def : Pat<(v16i16 (X86VBroadcast (v8i16 VR128:$src))),
7886 (VPBROADCASTWYrr VR128:$src)>;
7887 def : Pat<(v4i32 (X86VBroadcast (v4i32 VR128:$src))),
7888 (VPBROADCASTDrr VR128:$src)>;
7889 def : Pat<(v8i32 (X86VBroadcast (v4i32 VR128:$src))),
7890 (VPBROADCASTDYrr VR128:$src)>;
7891 def : Pat<(v2i64 (X86VBroadcast (v2i64 VR128:$src))),
7892 (VPBROADCASTQrr VR128:$src)>;
7893 def : Pat<(v4i64 (X86VBroadcast (v2i64 VR128:$src))),
7894 (VPBROADCASTQYrr VR128:$src)>;
7895 def : Pat<(v4f32 (X86VBroadcast (v4f32 VR128:$src))),
7896 (VBROADCASTSSrr VR128:$src)>;
7897 def : Pat<(v8f32 (X86VBroadcast (v4f32 VR128:$src))),
7898 (VBROADCASTSSYrr VR128:$src)>;
7899 def : Pat<(v2f64 (X86VBroadcast (v2f64 VR128:$src))),
7900 (VPBROADCASTQrr VR128:$src)>;
7901 def : Pat<(v4f64 (X86VBroadcast (v2f64 VR128:$src))),
7902 (VBROADCASTSDYrr VR128:$src)>;
7904 // Provide fallback in case the load node that is used in the patterns above
7905 // is used by additional users, which prevents the pattern selection.
7906 let AddedComplexity = 20 in {
7907 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7908 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
7909 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7910 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
7911 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7912 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
7914 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7915 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
7916 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7917 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
7918 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7919 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
7923 // AVX1 broadcast patterns
7924 let Predicates = [HasAVX1Only] in {
7925 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
7926 (VBROADCASTSSYrm addr:$src)>;
7927 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
7928 (VBROADCASTSDYrm addr:$src)>;
7929 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
7930 (VBROADCASTSSrm addr:$src)>;
7933 let Predicates = [HasAVX] in {
7934 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
7935 (VBROADCASTSSYrm addr:$src)>;
7936 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
7937 (VBROADCASTSDYrm addr:$src)>;
7938 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
7939 (VBROADCASTSSrm addr:$src)>;
7941 // Provide fallback in case the load node that is used in the patterns above
7942 // is used by additional users, which prevents the pattern selection.
7943 let AddedComplexity = 20 in {
7944 // 128bit broadcasts:
7945 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7946 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
7947 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7948 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
7949 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
7950 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
7951 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7952 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
7953 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
7954 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
7956 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7957 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
7958 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7959 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
7960 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
7961 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
7962 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7963 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
7964 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
7965 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
7969 //===----------------------------------------------------------------------===//
7970 // VPERM - Permute instructions
7973 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7975 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
7976 (ins VR256:$src1, VR256:$src2),
7977 !strconcat(OpcodeStr,
7978 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7980 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
7982 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
7983 (ins VR256:$src1, i256mem:$src2),
7984 !strconcat(OpcodeStr,
7985 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7987 (OpVT (X86VPermv VR256:$src1,
7988 (bitconvert (mem_frag addr:$src2)))))]>,
7992 defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, v8i32>;
7993 let ExeDomain = SSEPackedSingle in
7994 defm VPERMPS : avx2_perm<0x16, "vpermps", memopv8f32, v8f32>;
7996 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7998 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
7999 (ins VR256:$src1, i8imm:$src2),
8000 !strconcat(OpcodeStr,
8001 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8003 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8005 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8006 (ins i256mem:$src1, i8imm:$src2),
8007 !strconcat(OpcodeStr,
8008 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8010 (OpVT (X86VPermi (mem_frag addr:$src1),
8011 (i8 imm:$src2))))]>, VEX, VEX_L;
8014 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, v4i64>, VEX_W;
8015 let ExeDomain = SSEPackedDouble in
8016 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, v4f64>, VEX_W;
8018 //===----------------------------------------------------------------------===//
8019 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8021 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8022 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
8023 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8024 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8025 (i8 imm:$src3))))]>, VEX_4V, VEX_L;
8026 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8027 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
8028 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8029 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv4i64 addr:$src2),
8030 (i8 imm:$src3)))]>, VEX_4V, VEX_L;
8032 let Predicates = [HasAVX2] in {
8033 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8034 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8035 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8036 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8037 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8038 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8040 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (memopv4i64 addr:$src2)),
8042 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8043 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8044 (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
8045 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8046 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)),
8048 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8052 //===----------------------------------------------------------------------===//
8053 // VINSERTI128 - Insert packed integer values
8055 let neverHasSideEffects = 1 in {
8056 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8057 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
8058 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8061 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8062 (ins VR256:$src1, i128mem:$src2, i8imm:$src3),
8063 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8067 let Predicates = [HasAVX2] in {
8068 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8070 (VINSERTI128rr VR256:$src1, VR128:$src2,
8071 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8072 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8074 (VINSERTI128rr VR256:$src1, VR128:$src2,
8075 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8076 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8078 (VINSERTI128rr VR256:$src1, VR128:$src2,
8079 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8080 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8082 (VINSERTI128rr VR256:$src1, VR128:$src2,
8083 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8085 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
8087 (VINSERTI128rm VR256:$src1, addr:$src2,
8088 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8089 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
8090 (bc_v4i32 (memopv2i64 addr:$src2)),
8092 (VINSERTI128rm VR256:$src1, addr:$src2,
8093 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8094 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
8095 (bc_v16i8 (memopv2i64 addr:$src2)),
8097 (VINSERTI128rm VR256:$src1, addr:$src2,
8098 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8099 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
8100 (bc_v8i16 (memopv2i64 addr:$src2)),
8102 (VINSERTI128rm VR256:$src1, addr:$src2,
8103 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8106 //===----------------------------------------------------------------------===//
8107 // VEXTRACTI128 - Extract packed integer values
8109 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8110 (ins VR256:$src1, i8imm:$src2),
8111 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8113 (int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
8115 let neverHasSideEffects = 1, mayStore = 1 in
8116 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8117 (ins i128mem:$dst, VR256:$src1, i8imm:$src2),
8118 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8121 let Predicates = [HasAVX2] in {
8122 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
8123 (v2i64 (VEXTRACTI128rr
8124 (v4i64 VR256:$src1),
8125 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
8126 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
8127 (v4i32 (VEXTRACTI128rr
8128 (v8i32 VR256:$src1),
8129 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
8130 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
8131 (v8i16 (VEXTRACTI128rr
8132 (v16i16 VR256:$src1),
8133 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
8134 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
8135 (v16i8 (VEXTRACTI128rr
8136 (v32i8 VR256:$src1),
8137 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
8139 def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1),
8140 (iPTR imm))), addr:$dst),
8141 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8142 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
8143 def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1),
8144 (iPTR imm))), addr:$dst),
8145 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8146 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
8147 def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1),
8148 (iPTR imm))), addr:$dst),
8149 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8150 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
8151 def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1),
8152 (iPTR imm))), addr:$dst),
8153 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8154 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
8157 //===----------------------------------------------------------------------===//
8158 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8160 multiclass avx2_pmovmask<string OpcodeStr,
8161 Intrinsic IntLd128, Intrinsic IntLd256,
8162 Intrinsic IntSt128, Intrinsic IntSt256> {
8163 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8164 (ins VR128:$src1, i128mem:$src2),
8165 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8166 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
8167 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8168 (ins VR256:$src1, i256mem:$src2),
8169 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8170 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8172 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8173 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8174 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8175 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8176 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8177 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8178 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8179 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8182 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8183 int_x86_avx2_maskload_d,
8184 int_x86_avx2_maskload_d_256,
8185 int_x86_avx2_maskstore_d,
8186 int_x86_avx2_maskstore_d_256>;
8187 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8188 int_x86_avx2_maskload_q,
8189 int_x86_avx2_maskload_q_256,
8190 int_x86_avx2_maskstore_q,
8191 int_x86_avx2_maskstore_q_256>, VEX_W;
8194 //===----------------------------------------------------------------------===//
8195 // Variable Bit Shifts
8197 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
8198 ValueType vt128, ValueType vt256> {
8199 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
8200 (ins VR128:$src1, VR128:$src2),
8201 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8203 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
8205 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
8206 (ins VR128:$src1, i128mem:$src2),
8207 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8209 (vt128 (OpNode VR128:$src1,
8210 (vt128 (bitconvert (memopv2i64 addr:$src2))))))]>,
8212 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8213 (ins VR256:$src1, VR256:$src2),
8214 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8216 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
8218 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8219 (ins VR256:$src1, i256mem:$src2),
8220 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8222 (vt256 (OpNode VR256:$src1,
8223 (vt256 (bitconvert (memopv4i64 addr:$src2))))))]>,
8227 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
8228 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
8229 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
8230 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
8231 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
8233 //===----------------------------------------------------------------------===//
8234 // VGATHER - GATHER Operations
8235 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
8236 X86MemOperand memop128, X86MemOperand memop256> {
8237 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
8238 (ins VR128:$src1, memop128:$src2, VR128:$mask),
8239 !strconcat(OpcodeStr,
8240 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8242 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
8243 (ins RC256:$src1, memop256:$src2, RC256:$mask),
8244 !strconcat(OpcodeStr,
8245 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8246 []>, VEX_4VOp3, VEX_L;
8249 let mayLoad = 1, Constraints = "$src1 = $dst, $mask = $mask_wb" in {
8250 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
8251 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
8252 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
8253 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
8254 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
8255 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
8256 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
8257 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;