1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
21 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
27 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
28 InstrItinClass arg_ri> {
29 InstrItinClass rr = arg_rr;
30 InstrItinClass rm = arg_rm;
31 InstrItinClass ri = arg_ri;
36 def SSE_ALU_F32S : OpndItins<
37 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
40 def SSE_ALU_F64S : OpndItins<
41 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
44 def SSE_ALU_ITINS_S : SizeItins<
45 SSE_ALU_F32S, SSE_ALU_F64S
48 def SSE_MUL_F32S : OpndItins<
49 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
52 def SSE_MUL_F64S : OpndItins<
53 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
56 def SSE_MUL_ITINS_S : SizeItins<
57 SSE_MUL_F32S, SSE_MUL_F64S
60 def SSE_DIV_F32S : OpndItins<
61 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
64 def SSE_DIV_F64S : OpndItins<
65 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
68 def SSE_DIV_ITINS_S : SizeItins<
69 SSE_DIV_F32S, SSE_DIV_F64S
73 def SSE_ALU_F32P : OpndItins<
74 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
77 def SSE_ALU_F64P : OpndItins<
78 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
81 def SSE_ALU_ITINS_P : SizeItins<
82 SSE_ALU_F32P, SSE_ALU_F64P
85 def SSE_MUL_F32P : OpndItins<
86 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
89 def SSE_MUL_F64P : OpndItins<
90 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
93 def SSE_MUL_ITINS_P : SizeItins<
94 SSE_MUL_F32P, SSE_MUL_F64P
97 def SSE_DIV_F32P : OpndItins<
98 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
101 def SSE_DIV_F64P : OpndItins<
102 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
105 def SSE_DIV_ITINS_P : SizeItins<
106 SSE_DIV_F32P, SSE_DIV_F64P
109 def SSE_BIT_ITINS_P : OpndItins<
110 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
113 def SSE_INTALU_ITINS_P : OpndItins<
114 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
117 def SSE_INTALUQ_ITINS_P : OpndItins<
118 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
121 def SSE_INTMUL_ITINS_P : OpndItins<
122 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
125 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
126 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
129 def SSE_MOVA_ITINS : OpndItins<
130 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
133 def SSE_MOVU_ITINS : OpndItins<
134 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
137 //===----------------------------------------------------------------------===//
138 // SSE 1 & 2 Instructions Classes
139 //===----------------------------------------------------------------------===//
141 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
142 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
143 RegisterClass RC, X86MemOperand x86memop,
146 let isCommutable = 1 in {
147 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
149 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
150 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
151 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr>;
153 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
155 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
156 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
157 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm>;
160 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
161 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
162 string asm, string SSEVer, string FPSizeStr,
163 Operand memopr, ComplexPattern mem_cpat,
166 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
168 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
169 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
170 [(set RC:$dst, (!cast<Intrinsic>(
171 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
172 RC:$src1, RC:$src2))], itins.rr>;
173 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
175 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
176 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
177 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
178 SSEVer, "_", OpcodeStr, FPSizeStr))
179 RC:$src1, mem_cpat:$src2))], itins.rm>;
182 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
183 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
184 RegisterClass RC, ValueType vt,
185 X86MemOperand x86memop, PatFrag mem_frag,
186 Domain d, OpndItins itins, bit Is2Addr = 1> {
187 let isCommutable = 1 in
188 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
190 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
191 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
192 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>;
194 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
196 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
197 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
198 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
202 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
203 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
204 string OpcodeStr, X86MemOperand x86memop,
205 list<dag> pat_rr, list<dag> pat_rm,
207 bit rr_hasSideEffects = 0> {
208 let isCommutable = 1, neverHasSideEffects = rr_hasSideEffects in
209 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
211 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
212 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
213 pat_rr, IIC_DEFAULT, d>;
214 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
216 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
217 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
218 pat_rm, IIC_DEFAULT, d>;
221 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
222 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
223 string asm, string SSEVer, string FPSizeStr,
224 X86MemOperand x86memop, PatFrag mem_frag,
225 Domain d, OpndItins itins, bit Is2Addr = 1> {
226 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
228 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
229 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
230 [(set RC:$dst, (!cast<Intrinsic>(
231 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
232 RC:$src1, RC:$src2))], IIC_DEFAULT, d>;
233 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
235 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
236 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
237 [(set RC:$dst, (!cast<Intrinsic>(
238 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
239 RC:$src1, (mem_frag addr:$src2)))], IIC_DEFAULT, d>;
242 //===----------------------------------------------------------------------===//
243 // Non-instruction patterns
244 //===----------------------------------------------------------------------===//
246 // A vector extract of the first f32/f64 position is a subregister copy
247 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
248 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
249 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
250 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
252 // A 128-bit subvector extract from the first 256-bit vector position
253 // is a subregister copy that needs no instruction.
254 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
255 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
256 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
257 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
259 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
260 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
261 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
262 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
264 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
265 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
266 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
267 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
269 // A 128-bit subvector insert to the first 256-bit vector position
270 // is a subregister copy that needs no instruction.
271 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
272 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
273 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
274 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
275 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
276 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
277 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
278 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
279 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
280 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
281 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
282 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
283 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
286 // Implicitly promote a 32-bit scalar to a vector.
287 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
288 (COPY_TO_REGCLASS FR32:$src, VR128)>;
289 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
290 (COPY_TO_REGCLASS FR32:$src, VR128)>;
291 // Implicitly promote a 64-bit scalar to a vector.
292 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
293 (COPY_TO_REGCLASS FR64:$src, VR128)>;
294 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
295 (COPY_TO_REGCLASS FR64:$src, VR128)>;
297 // Bitcasts between 128-bit vector types. Return the original type since
298 // no instruction is needed for the conversion
299 let Predicates = [HasSSE2] in {
300 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
301 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
302 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
303 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
304 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
305 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
306 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
307 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
308 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
309 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
310 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
311 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
312 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
313 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
314 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
315 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
316 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
317 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
318 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
319 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
320 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
321 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
322 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
323 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
324 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
325 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
326 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
327 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
328 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
329 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
332 // Bitcasts between 256-bit vector types. Return the original type since
333 // no instruction is needed for the conversion
334 let Predicates = [HasAVX] in {
335 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
336 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
337 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
338 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
339 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
340 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
341 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
342 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
343 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
344 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
345 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
346 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
347 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
348 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
349 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
350 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
351 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
352 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
353 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
354 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
355 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
356 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
357 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
358 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
359 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
360 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
361 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
362 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
363 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
364 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
367 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
368 // This is expanded by ExpandPostRAPseudos.
369 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
371 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
372 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
373 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
374 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
377 //===----------------------------------------------------------------------===//
378 // AVX & SSE - Zero/One Vectors
379 //===----------------------------------------------------------------------===//
381 // Alias instruction that maps zero vector to pxor / xorp* for sse.
382 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
383 // swizzled by ExecutionDepsFix to pxor.
384 // We set canFoldAsLoad because this can be converted to a constant-pool
385 // load of an all-zeros value if folding it would be beneficial.
386 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
388 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
389 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
392 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
393 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
394 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
395 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
396 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
399 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
400 // and doesn't need it because on sandy bridge the register is set to zero
401 // at the rename stage without using any execution unit, so SET0PSY
402 // and SET0PDY can be used for vector int instructions without penalty
403 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
404 isPseudo = 1, Predicates = [HasAVX] in {
405 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
406 [(set VR256:$dst, (v8f32 immAllZerosV))]>;
409 let Predicates = [HasAVX] in
410 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
412 let Predicates = [HasAVX2] in {
413 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
414 def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
415 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
416 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
419 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
420 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
421 let Predicates = [HasAVX1Only] in {
422 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
423 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
424 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
426 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
427 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
428 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
430 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
431 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
432 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
434 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
435 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
436 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
439 // We set canFoldAsLoad because this can be converted to a constant-pool
440 // load of an all-ones value if folding it would be beneficial.
441 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
443 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
444 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
445 let Predicates = [HasAVX2] in
446 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
447 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
451 //===----------------------------------------------------------------------===//
452 // SSE 1 & 2 - Move FP Scalar Instructions
454 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
455 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
456 // is used instead. Register-to-register movss/movsd is not modeled as an
457 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
458 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
459 //===----------------------------------------------------------------------===//
461 class sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt, string asm> :
462 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
463 [(set VR128:$dst, (vt (OpNode VR128:$src1,
464 (scalar_to_vector RC:$src2))))],
467 // Loading from memory automatically zeroing upper bits.
468 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
469 PatFrag mem_pat, string OpcodeStr> :
470 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
471 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
472 [(set RC:$dst, (mem_pat addr:$src))],
476 def VMOVSSrr : sse12_move_rr<FR32, X86Movss, v4f32,
477 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V,
479 def VMOVSDrr : sse12_move_rr<FR64, X86Movsd, v2f64,
480 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V,
483 // For the disassembler
484 let isCodeGenOnly = 1 in {
485 def VMOVSSrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
486 (ins VR128:$src1, FR32:$src2),
487 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
490 def VMOVSDrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
491 (ins VR128:$src1, FR64:$src2),
492 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
497 let canFoldAsLoad = 1, isReMaterializable = 1 in {
498 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX,
500 let AddedComplexity = 20 in
501 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX,
505 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
506 "movss\t{$src, $dst|$dst, $src}",
507 [(store FR32:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
509 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
510 "movsd\t{$src, $dst|$dst, $src}",
511 [(store FR64:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
515 let Constraints = "$src1 = $dst" in {
516 def MOVSSrr : sse12_move_rr<FR32, X86Movss, v4f32,
517 "movss\t{$src2, $dst|$dst, $src2}">, XS;
518 def MOVSDrr : sse12_move_rr<FR64, X86Movsd, v2f64,
519 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
521 // For the disassembler
522 let isCodeGenOnly = 1 in {
523 def MOVSSrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
524 (ins VR128:$src1, FR32:$src2),
525 "movss\t{$src2, $dst|$dst, $src2}", [],
526 IIC_SSE_MOV_S_RR>, XS;
527 def MOVSDrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
528 (ins VR128:$src1, FR64:$src2),
529 "movsd\t{$src2, $dst|$dst, $src2}", [],
530 IIC_SSE_MOV_S_RR>, XD;
534 let canFoldAsLoad = 1, isReMaterializable = 1 in {
535 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
537 let AddedComplexity = 20 in
538 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
541 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
542 "movss\t{$src, $dst|$dst, $src}",
543 [(store FR32:$src, addr:$dst)], IIC_SSE_MOV_S_MR>;
544 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
545 "movsd\t{$src, $dst|$dst, $src}",
546 [(store FR64:$src, addr:$dst)], IIC_SSE_MOV_S_MR>;
549 let Predicates = [HasAVX] in {
550 let AddedComplexity = 15 in {
551 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
552 // MOVS{S,D} to the lower bits.
553 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
554 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
555 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
556 (VMOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
557 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
558 (VMOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
559 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
560 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
562 // Move low f32 and clear high bits.
563 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
564 (SUBREG_TO_REG (i32 0),
565 (VMOVSSrr (v4f32 (V_SET0)),
566 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)), sub_xmm)>;
567 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
568 (SUBREG_TO_REG (i32 0),
569 (VMOVSSrr (v4i32 (V_SET0)),
570 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)), sub_xmm)>;
573 let AddedComplexity = 20 in {
574 // MOVSSrm zeros the high parts of the register; represent this
575 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
576 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
577 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
578 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
579 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
580 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
581 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
583 // MOVSDrm zeros the high parts of the register; represent this
584 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
585 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
586 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
587 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
588 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
589 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
590 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
591 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
592 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
593 def : Pat<(v2f64 (X86vzload addr:$src)),
594 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
596 // Represent the same patterns above but in the form they appear for
598 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
599 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
600 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
601 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
602 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
603 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
604 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
605 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
606 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
608 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
609 (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
610 (SUBREG_TO_REG (i32 0),
611 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
613 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
614 (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
615 (SUBREG_TO_REG (i64 0),
616 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
618 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
619 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
620 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
622 // Move low f64 and clear high bits.
623 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
624 (SUBREG_TO_REG (i32 0),
625 (VMOVSDrr (v2f64 (V_SET0)),
626 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)), sub_xmm)>;
628 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
629 (SUBREG_TO_REG (i32 0),
630 (VMOVSDrr (v2i64 (V_SET0)),
631 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)), sub_xmm)>;
633 // Extract and store.
634 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
636 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
637 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
639 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
641 // Shuffle with VMOVSS
642 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
643 (VMOVSSrr (v4i32 VR128:$src1),
644 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
645 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
646 (VMOVSSrr (v4f32 VR128:$src1),
647 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
650 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
651 (SUBREG_TO_REG (i32 0),
652 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
653 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
655 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
656 (SUBREG_TO_REG (i32 0),
657 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
658 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
661 // Shuffle with VMOVSD
662 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
663 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
664 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
665 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
666 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
667 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
668 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
669 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
672 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
673 (SUBREG_TO_REG (i32 0),
674 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
675 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
677 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
678 (SUBREG_TO_REG (i32 0),
679 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
680 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
684 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
685 // is during lowering, where it's not possible to recognize the fold cause
686 // it has two uses through a bitcast. One use disappears at isel time and the
687 // fold opportunity reappears.
688 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
689 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
690 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
691 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
692 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
693 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
694 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
695 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
698 let Predicates = [UseSSE1] in {
699 let AddedComplexity = 15 in {
700 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
701 // MOVSS to the lower bits.
702 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
703 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
704 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
705 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
706 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
707 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
710 let AddedComplexity = 20 in {
711 // MOVSSrm already zeros the high parts of the register.
712 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
713 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
714 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
715 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
716 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
717 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
720 // Extract and store.
721 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
723 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
725 // Shuffle with MOVSS
726 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
727 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
728 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
729 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
732 let Predicates = [UseSSE2] in {
733 let AddedComplexity = 15 in {
734 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
735 // MOVSD to the lower bits.
736 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
737 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
740 let AddedComplexity = 20 in {
741 // MOVSDrm already zeros the high parts of the register.
742 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
743 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
744 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
745 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
746 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
747 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
748 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
749 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
750 def : Pat<(v2f64 (X86vzload addr:$src)),
751 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
754 // Extract and store.
755 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
757 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
759 // Shuffle with MOVSD
760 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
761 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
762 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
763 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
764 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
765 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
766 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
767 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
769 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
770 // is during lowering, where it's not possible to recognize the fold cause
771 // it has two uses through a bitcast. One use disappears at isel time and the
772 // fold opportunity reappears.
773 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
774 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
775 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
776 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
777 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
778 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
779 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
780 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
783 //===----------------------------------------------------------------------===//
784 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
785 //===----------------------------------------------------------------------===//
787 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
788 X86MemOperand x86memop, PatFrag ld_frag,
789 string asm, Domain d,
791 bit IsReMaterializable = 1> {
792 let neverHasSideEffects = 1 in
793 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
794 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>;
795 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
796 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
797 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
798 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>;
801 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
802 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
804 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
805 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
807 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
808 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
810 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
811 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
814 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
815 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
817 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
818 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
819 TB, OpSize, VEX, VEX_L;
820 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
821 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
823 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
824 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
825 TB, OpSize, VEX, VEX_L;
826 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
827 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
829 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
830 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
832 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
833 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
835 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
836 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
839 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
840 "movaps\t{$src, $dst|$dst, $src}",
841 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
842 IIC_SSE_MOVA_P_MR>, VEX;
843 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
844 "movapd\t{$src, $dst|$dst, $src}",
845 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
846 IIC_SSE_MOVA_P_MR>, VEX;
847 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
848 "movups\t{$src, $dst|$dst, $src}",
849 [(store (v4f32 VR128:$src), addr:$dst)],
850 IIC_SSE_MOVU_P_MR>, VEX;
851 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
852 "movupd\t{$src, $dst|$dst, $src}",
853 [(store (v2f64 VR128:$src), addr:$dst)],
854 IIC_SSE_MOVU_P_MR>, VEX;
855 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
856 "movaps\t{$src, $dst|$dst, $src}",
857 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
858 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
859 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
860 "movapd\t{$src, $dst|$dst, $src}",
861 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
862 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
863 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
864 "movups\t{$src, $dst|$dst, $src}",
865 [(store (v8f32 VR256:$src), addr:$dst)],
866 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
867 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
868 "movupd\t{$src, $dst|$dst, $src}",
869 [(store (v4f64 VR256:$src), addr:$dst)],
870 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
873 let isCodeGenOnly = 1 in {
874 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
876 "movaps\t{$src, $dst|$dst, $src}", [],
877 IIC_SSE_MOVA_P_RR>, VEX;
878 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
880 "movapd\t{$src, $dst|$dst, $src}", [],
881 IIC_SSE_MOVA_P_RR>, VEX;
882 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
884 "movups\t{$src, $dst|$dst, $src}", [],
885 IIC_SSE_MOVU_P_RR>, VEX;
886 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
888 "movupd\t{$src, $dst|$dst, $src}", [],
889 IIC_SSE_MOVU_P_RR>, VEX;
890 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
892 "movaps\t{$src, $dst|$dst, $src}", [],
893 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
894 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
896 "movapd\t{$src, $dst|$dst, $src}", [],
897 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
898 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
900 "movups\t{$src, $dst|$dst, $src}", [],
901 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
902 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
904 "movupd\t{$src, $dst|$dst, $src}", [],
905 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
908 let Predicates = [HasAVX] in {
909 def : Pat<(v8i32 (X86vzmovl
910 (insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))),
911 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
912 def : Pat<(v4i64 (X86vzmovl
913 (insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))),
914 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
915 def : Pat<(v8f32 (X86vzmovl
916 (insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))),
917 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
918 def : Pat<(v4f64 (X86vzmovl
919 (insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))),
920 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
924 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
925 (VMOVUPSYmr addr:$dst, VR256:$src)>;
926 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
927 (VMOVUPDYmr addr:$dst, VR256:$src)>;
929 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
930 "movaps\t{$src, $dst|$dst, $src}",
931 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
933 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
934 "movapd\t{$src, $dst|$dst, $src}",
935 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
937 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
938 "movups\t{$src, $dst|$dst, $src}",
939 [(store (v4f32 VR128:$src), addr:$dst)],
941 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
942 "movupd\t{$src, $dst|$dst, $src}",
943 [(store (v2f64 VR128:$src), addr:$dst)],
947 let isCodeGenOnly = 1 in {
948 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
949 "movaps\t{$src, $dst|$dst, $src}", [],
951 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
952 "movapd\t{$src, $dst|$dst, $src}", [],
954 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
955 "movups\t{$src, $dst|$dst, $src}", [],
957 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
958 "movupd\t{$src, $dst|$dst, $src}", [],
962 let Predicates = [HasAVX] in {
963 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
964 (VMOVUPSmr addr:$dst, VR128:$src)>;
965 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
966 (VMOVUPDmr addr:$dst, VR128:$src)>;
969 let Predicates = [UseSSE1] in
970 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
971 (MOVUPSmr addr:$dst, VR128:$src)>;
972 let Predicates = [UseSSE2] in
973 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
974 (MOVUPDmr addr:$dst, VR128:$src)>;
976 // Use vmovaps/vmovups for AVX integer load/store.
977 let Predicates = [HasAVX] in {
978 // 128-bit load/store
979 def : Pat<(alignedloadv2i64 addr:$src),
980 (VMOVAPSrm addr:$src)>;
981 def : Pat<(loadv2i64 addr:$src),
982 (VMOVUPSrm addr:$src)>;
984 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
985 (VMOVAPSmr addr:$dst, VR128:$src)>;
986 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
987 (VMOVAPSmr addr:$dst, VR128:$src)>;
988 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
989 (VMOVAPSmr addr:$dst, VR128:$src)>;
990 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
991 (VMOVAPSmr addr:$dst, VR128:$src)>;
992 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
993 (VMOVUPSmr addr:$dst, VR128:$src)>;
994 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
995 (VMOVUPSmr addr:$dst, VR128:$src)>;
996 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
997 (VMOVUPSmr addr:$dst, VR128:$src)>;
998 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
999 (VMOVUPSmr addr:$dst, VR128:$src)>;
1001 // 256-bit load/store
1002 def : Pat<(alignedloadv4i64 addr:$src),
1003 (VMOVAPSYrm addr:$src)>;
1004 def : Pat<(loadv4i64 addr:$src),
1005 (VMOVUPSYrm addr:$src)>;
1006 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1007 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1008 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1009 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1010 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1011 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1012 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1013 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1014 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1015 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1016 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1017 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1018 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1019 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1020 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1021 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1023 // Special patterns for storing subvector extracts of lower 128-bits
1024 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
1025 def : Pat<(alignedstore (v2f64 (extract_subvector
1026 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1027 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1028 def : Pat<(alignedstore (v4f32 (extract_subvector
1029 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1030 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1031 def : Pat<(alignedstore (v2i64 (extract_subvector
1032 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1033 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1034 def : Pat<(alignedstore (v4i32 (extract_subvector
1035 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1036 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1037 def : Pat<(alignedstore (v8i16 (extract_subvector
1038 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1039 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1040 def : Pat<(alignedstore (v16i8 (extract_subvector
1041 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1042 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1044 def : Pat<(store (v2f64 (extract_subvector
1045 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1046 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1047 def : Pat<(store (v4f32 (extract_subvector
1048 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1049 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1050 def : Pat<(store (v2i64 (extract_subvector
1051 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1052 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1053 def : Pat<(store (v4i32 (extract_subvector
1054 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1055 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1056 def : Pat<(store (v8i16 (extract_subvector
1057 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1058 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1059 def : Pat<(store (v16i8 (extract_subvector
1060 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1061 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1064 // Use movaps / movups for SSE integer load / store (one byte shorter).
1065 // The instructions selected below are then converted to MOVDQA/MOVDQU
1066 // during the SSE domain pass.
1067 let Predicates = [UseSSE1] in {
1068 def : Pat<(alignedloadv2i64 addr:$src),
1069 (MOVAPSrm addr:$src)>;
1070 def : Pat<(loadv2i64 addr:$src),
1071 (MOVUPSrm addr:$src)>;
1073 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1074 (MOVAPSmr addr:$dst, VR128:$src)>;
1075 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1076 (MOVAPSmr addr:$dst, VR128:$src)>;
1077 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1078 (MOVAPSmr addr:$dst, VR128:$src)>;
1079 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1080 (MOVAPSmr addr:$dst, VR128:$src)>;
1081 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1082 (MOVUPSmr addr:$dst, VR128:$src)>;
1083 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1084 (MOVUPSmr addr:$dst, VR128:$src)>;
1085 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1086 (MOVUPSmr addr:$dst, VR128:$src)>;
1087 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1088 (MOVUPSmr addr:$dst, VR128:$src)>;
1091 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1092 // bits are disregarded. FIXME: Set encoding to pseudo!
1093 let neverHasSideEffects = 1 in {
1094 def FsVMOVAPSrr : VPSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1095 "movaps\t{$src, $dst|$dst, $src}", [],
1096 IIC_SSE_MOVA_P_RR>, VEX;
1097 def FsVMOVAPDrr : VPDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1098 "movapd\t{$src, $dst|$dst, $src}", [],
1099 IIC_SSE_MOVA_P_RR>, VEX;
1100 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1101 "movaps\t{$src, $dst|$dst, $src}", [],
1103 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1104 "movapd\t{$src, $dst|$dst, $src}", [],
1108 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1109 // bits are disregarded. FIXME: Set encoding to pseudo!
1110 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1111 let isCodeGenOnly = 1 in {
1112 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1113 "movaps\t{$src, $dst|$dst, $src}",
1114 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1115 IIC_SSE_MOVA_P_RM>, VEX;
1116 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1117 "movapd\t{$src, $dst|$dst, $src}",
1118 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1119 IIC_SSE_MOVA_P_RM>, VEX;
1121 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1122 "movaps\t{$src, $dst|$dst, $src}",
1123 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1125 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1126 "movapd\t{$src, $dst|$dst, $src}",
1127 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1131 //===----------------------------------------------------------------------===//
1132 // SSE 1 & 2 - Move Low packed FP Instructions
1133 //===----------------------------------------------------------------------===//
1135 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
1136 SDNode psnode, SDNode pdnode, string base_opc,
1137 string asm_opr, InstrItinClass itin> {
1138 def PSrm : PI<opc, MRMSrcMem,
1139 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1140 !strconcat(base_opc, "s", asm_opr),
1143 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1144 itin, SSEPackedSingle>, TB;
1146 def PDrm : PI<opc, MRMSrcMem,
1147 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
1148 !strconcat(base_opc, "d", asm_opr),
1149 [(set RC:$dst, (v2f64 (pdnode RC:$src1,
1150 (scalar_to_vector (loadf64 addr:$src2)))))],
1151 itin, SSEPackedDouble>, TB, OpSize;
1154 let AddedComplexity = 20 in {
1155 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, X86Movlps, X86Movlpd, "movlp",
1156 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1157 IIC_SSE_MOV_LH>, VEX_4V;
1159 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1160 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, X86Movlps, X86Movlpd, "movlp",
1161 "\t{$src2, $dst|$dst, $src2}",
1165 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1166 "movlps\t{$src, $dst|$dst, $src}",
1167 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1168 (iPTR 0))), addr:$dst)],
1169 IIC_SSE_MOV_LH>, VEX;
1170 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1171 "movlpd\t{$src, $dst|$dst, $src}",
1172 [(store (f64 (vector_extract (v2f64 VR128:$src),
1173 (iPTR 0))), addr:$dst)],
1174 IIC_SSE_MOV_LH>, VEX;
1175 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1176 "movlps\t{$src, $dst|$dst, $src}",
1177 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1178 (iPTR 0))), addr:$dst)],
1180 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1181 "movlpd\t{$src, $dst|$dst, $src}",
1182 [(store (f64 (vector_extract (v2f64 VR128:$src),
1183 (iPTR 0))), addr:$dst)],
1186 let Predicates = [HasAVX] in {
1187 // Shuffle with VMOVLPS
1188 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1189 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1190 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1191 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1193 // Shuffle with VMOVLPD
1194 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1195 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1196 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1197 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1200 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1202 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1203 def : Pat<(store (v4i32 (X86Movlps
1204 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1205 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1206 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1208 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1209 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1211 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1214 let Predicates = [UseSSE1] in {
1215 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1216 def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
1217 (iPTR 0))), addr:$src1),
1218 (MOVLPSmr addr:$src1, VR128:$src2)>;
1220 // Shuffle with MOVLPS
1221 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1222 (MOVLPSrm VR128:$src1, addr:$src2)>;
1223 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1224 (MOVLPSrm VR128:$src1, addr:$src2)>;
1225 def : Pat<(X86Movlps VR128:$src1,
1226 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1227 (MOVLPSrm VR128:$src1, addr:$src2)>;
1230 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1232 (MOVLPSmr addr:$src1, VR128:$src2)>;
1233 def : Pat<(store (v4i32 (X86Movlps
1234 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1236 (MOVLPSmr addr:$src1, VR128:$src2)>;
1239 let Predicates = [UseSSE2] in {
1240 // Shuffle with MOVLPD
1241 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1242 (MOVLPDrm VR128:$src1, addr:$src2)>;
1243 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1244 (MOVLPDrm VR128:$src1, addr:$src2)>;
1247 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1249 (MOVLPDmr addr:$src1, VR128:$src2)>;
1250 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1252 (MOVLPDmr addr:$src1, VR128:$src2)>;
1255 //===----------------------------------------------------------------------===//
1256 // SSE 1 & 2 - Move Hi packed FP Instructions
1257 //===----------------------------------------------------------------------===//
1259 let AddedComplexity = 20 in {
1260 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, X86Movlhps, X86Movlhpd, "movhp",
1261 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1262 IIC_SSE_MOV_LH>, VEX_4V;
1264 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1265 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, X86Movlhps, X86Movlhpd, "movhp",
1266 "\t{$src2, $dst|$dst, $src2}",
1270 // v2f64 extract element 1 is always custom lowered to unpack high to low
1271 // and extract element 0 so the non-store version isn't too horrible.
1272 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1273 "movhps\t{$src, $dst|$dst, $src}",
1274 [(store (f64 (vector_extract
1275 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1276 (bc_v2f64 (v4f32 VR128:$src))),
1277 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1278 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1279 "movhpd\t{$src, $dst|$dst, $src}",
1280 [(store (f64 (vector_extract
1281 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1282 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1283 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1284 "movhps\t{$src, $dst|$dst, $src}",
1285 [(store (f64 (vector_extract
1286 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1287 (bc_v2f64 (v4f32 VR128:$src))),
1288 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1289 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1290 "movhpd\t{$src, $dst|$dst, $src}",
1291 [(store (f64 (vector_extract
1292 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1293 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1295 let Predicates = [HasAVX] in {
1297 def : Pat<(X86Movlhps VR128:$src1,
1298 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1299 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1300 def : Pat<(X86Movlhps VR128:$src1,
1301 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1302 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1304 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1305 // is during lowering, where it's not possible to recognize the load fold
1306 // cause it has two uses through a bitcast. One use disappears at isel time
1307 // and the fold opportunity reappears.
1308 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1309 (scalar_to_vector (loadf64 addr:$src2)))),
1310 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1313 let Predicates = [UseSSE1] in {
1315 def : Pat<(X86Movlhps VR128:$src1,
1316 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1317 (MOVHPSrm VR128:$src1, addr:$src2)>;
1318 def : Pat<(X86Movlhps VR128:$src1,
1319 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1320 (MOVHPSrm VR128:$src1, addr:$src2)>;
1323 let Predicates = [UseSSE2] in {
1324 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1325 // is during lowering, where it's not possible to recognize the load fold
1326 // cause it has two uses through a bitcast. One use disappears at isel time
1327 // and the fold opportunity reappears.
1328 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1329 (scalar_to_vector (loadf64 addr:$src2)))),
1330 (MOVHPDrm VR128:$src1, addr:$src2)>;
1333 //===----------------------------------------------------------------------===//
1334 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1335 //===----------------------------------------------------------------------===//
1337 let AddedComplexity = 20 in {
1338 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1339 (ins VR128:$src1, VR128:$src2),
1340 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1342 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1345 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1346 (ins VR128:$src1, VR128:$src2),
1347 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1349 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1353 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1354 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1355 (ins VR128:$src1, VR128:$src2),
1356 "movlhps\t{$src2, $dst|$dst, $src2}",
1358 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1360 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1361 (ins VR128:$src1, VR128:$src2),
1362 "movhlps\t{$src2, $dst|$dst, $src2}",
1364 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1368 let Predicates = [HasAVX] in {
1370 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1371 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1372 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1373 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1376 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1377 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1380 let Predicates = [UseSSE1] in {
1382 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1383 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1384 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1385 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1388 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1389 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1392 //===----------------------------------------------------------------------===//
1393 // SSE 1 & 2 - Conversion Instructions
1394 //===----------------------------------------------------------------------===//
1396 def SSE_CVT_PD : OpndItins<
1397 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1400 def SSE_CVT_PS : OpndItins<
1401 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1404 def SSE_CVT_Scalar : OpndItins<
1405 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1408 def SSE_CVT_SS2SI_32 : OpndItins<
1409 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1412 def SSE_CVT_SS2SI_64 : OpndItins<
1413 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1416 def SSE_CVT_SD2SI : OpndItins<
1417 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1420 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1421 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1422 string asm, OpndItins itins> {
1423 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1424 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1426 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1427 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1431 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1432 X86MemOperand x86memop, string asm, Domain d,
1434 let neverHasSideEffects = 1 in {
1435 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1438 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1443 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1444 X86MemOperand x86memop, string asm> {
1445 let neverHasSideEffects = 1 in {
1446 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1447 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1449 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1450 (ins DstRC:$src1, x86memop:$src),
1451 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1452 } // neverHasSideEffects = 1
1455 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1456 "cvttss2si\t{$src, $dst|$dst, $src}",
1459 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1460 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1462 XS, VEX, VEX_W, VEX_LIG;
1463 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1464 "cvttsd2si\t{$src, $dst|$dst, $src}",
1467 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1468 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1470 XD, VEX, VEX_W, VEX_LIG;
1472 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1473 // register, but the same isn't true when only using memory operands,
1474 // provide other assembly "l" and "q" forms to address this explicitly
1475 // where appropriate to do so.
1476 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">,
1477 XS, VEX_4V, VEX_LIG;
1478 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1479 XS, VEX_4V, VEX_W, VEX_LIG;
1480 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">,
1481 XD, VEX_4V, VEX_LIG;
1482 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1483 XD, VEX_4V, VEX_W, VEX_LIG;
1485 def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
1486 (VCVTSI2SDrr FR64:$dst, FR64:$src1, GR32:$src)>;
1487 def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}",
1488 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src)>;
1490 let Predicates = [HasAVX] in {
1491 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1492 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1493 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1494 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1495 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1496 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1497 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1498 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1500 def : Pat<(f32 (sint_to_fp GR32:$src)),
1501 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1502 def : Pat<(f32 (sint_to_fp GR64:$src)),
1503 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1504 def : Pat<(f64 (sint_to_fp GR32:$src)),
1505 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1506 def : Pat<(f64 (sint_to_fp GR64:$src)),
1507 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1510 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1511 "cvttss2si\t{$src, $dst|$dst, $src}",
1512 SSE_CVT_SS2SI_32>, XS;
1513 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1514 "cvttss2si{q}\t{$src, $dst|$dst, $src}",
1515 SSE_CVT_SS2SI_64>, XS, REX_W;
1516 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1517 "cvttsd2si\t{$src, $dst|$dst, $src}",
1519 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1520 "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1521 SSE_CVT_SD2SI>, XD, REX_W;
1522 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1523 "cvtsi2ss\t{$src, $dst|$dst, $src}",
1524 SSE_CVT_Scalar>, XS;
1525 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1526 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1527 SSE_CVT_Scalar>, XS, REX_W;
1528 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1529 "cvtsi2sd\t{$src, $dst|$dst, $src}",
1530 SSE_CVT_Scalar>, XD;
1531 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1532 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1533 SSE_CVT_Scalar>, XD, REX_W;
1535 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1536 // and/or XMM operand(s).
1538 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1539 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1540 string asm, OpndItins itins> {
1541 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1542 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1543 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>;
1544 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1545 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1546 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>;
1549 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1550 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1551 PatFrag ld_frag, string asm, OpndItins itins,
1553 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1555 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1556 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1557 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1559 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1560 (ins DstRC:$src1, x86memop:$src2),
1562 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1563 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1564 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1568 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1569 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si{l}",
1570 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1571 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1572 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si{q}",
1573 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1575 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1576 sdmem, sse_load_f64, "cvtsd2si{l}", SSE_CVT_SD2SI>, XD;
1577 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1578 sdmem, sse_load_f64, "cvtsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
1581 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1582 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss",
1583 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1584 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1585 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1586 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1588 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1589 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd",
1590 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1591 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1592 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1593 SSE_CVT_Scalar, 0>, XD,
1596 let Constraints = "$src1 = $dst" in {
1597 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1598 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1599 "cvtsi2ss", SSE_CVT_Scalar>, XS;
1600 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1601 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1602 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1603 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1604 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1605 "cvtsi2sd", SSE_CVT_Scalar>, XD;
1606 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1607 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1608 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1613 // Aliases for intrinsics
1614 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1615 ssmem, sse_load_f32, "cvttss2si",
1616 SSE_CVT_SS2SI_32>, XS, VEX;
1617 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1618 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1619 "cvttss2si{q}", SSE_CVT_SS2SI_64>,
1621 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1622 sdmem, sse_load_f64, "cvttsd2si",
1623 SSE_CVT_SD2SI>, XD, VEX;
1624 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1625 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1626 "cvttsd2si{q}", SSE_CVT_SD2SI>,
1628 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1629 ssmem, sse_load_f32, "cvttss2si",
1630 SSE_CVT_SS2SI_32>, XS;
1631 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1632 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1633 "cvttss2si{q}", SSE_CVT_SS2SI_64>, XS, REX_W;
1634 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1635 sdmem, sse_load_f64, "cvttsd2si",
1637 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1638 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1639 "cvttsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
1641 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1642 ssmem, sse_load_f32, "cvtss2si{l}",
1643 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1644 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1645 ssmem, sse_load_f32, "cvtss2si{q}",
1646 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1648 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1649 ssmem, sse_load_f32, "cvtss2si{l}",
1650 SSE_CVT_SS2SI_32>, XS;
1651 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1652 ssmem, sse_load_f32, "cvtss2si{q}",
1653 SSE_CVT_SS2SI_64>, XS, REX_W;
1655 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1656 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1657 SSEPackedSingle, SSE_CVT_PS>,
1658 TB, VEX, Requires<[HasAVX]>;
1659 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1660 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1661 SSEPackedSingle, SSE_CVT_PS>,
1662 TB, VEX, VEX_L, Requires<[HasAVX]>;
1664 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1665 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1666 SSEPackedSingle, SSE_CVT_PS>,
1667 TB, Requires<[UseSSE2]>;
1671 // Convert scalar double to scalar single
1672 let neverHasSideEffects = 1 in {
1673 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1674 (ins FR64:$src1, FR64:$src2),
1675 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1676 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG;
1678 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1679 (ins FR64:$src1, f64mem:$src2),
1680 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1681 [], IIC_SSE_CVT_Scalar_RM>,
1682 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG;
1685 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1688 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1689 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1690 [(set FR32:$dst, (fround FR64:$src))],
1691 IIC_SSE_CVT_Scalar_RR>;
1692 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1693 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1694 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1695 IIC_SSE_CVT_Scalar_RM>,
1697 Requires<[UseSSE2, OptForSize]>;
1699 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1700 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1701 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1703 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1704 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>;
1705 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1706 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1707 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1708 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1709 VR128:$src1, sse_load_f64:$src2))],
1710 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>;
1712 let Constraints = "$src1 = $dst" in {
1713 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1714 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1715 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1717 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1718 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>;
1719 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1720 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1721 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1722 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1723 VR128:$src1, sse_load_f64:$src2))],
1724 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>;
1727 // Convert scalar single to scalar double
1728 // SSE2 instructions with XS prefix
1729 let neverHasSideEffects = 1 in {
1730 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1731 (ins FR32:$src1, FR32:$src2),
1732 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1733 [], IIC_SSE_CVT_Scalar_RR>,
1734 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG;
1736 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1737 (ins FR32:$src1, f32mem:$src2),
1738 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1739 [], IIC_SSE_CVT_Scalar_RM>,
1740 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>;
1743 def : Pat<(f64 (fextend FR32:$src)),
1744 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>;
1745 def : Pat<(fextend (loadf32 addr:$src)),
1746 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>;
1748 def : Pat<(extloadf32 addr:$src),
1749 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1750 Requires<[HasAVX, OptForSize]>;
1751 def : Pat<(extloadf32 addr:$src),
1752 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1753 Requires<[HasAVX, OptForSpeed]>;
1755 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1756 "cvtss2sd\t{$src, $dst|$dst, $src}",
1757 [(set FR64:$dst, (fextend FR32:$src))],
1758 IIC_SSE_CVT_Scalar_RR>, XS,
1759 Requires<[UseSSE2]>;
1760 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1761 "cvtss2sd\t{$src, $dst|$dst, $src}",
1762 [(set FR64:$dst, (extloadf32 addr:$src))],
1763 IIC_SSE_CVT_Scalar_RM>, XS,
1764 Requires<[UseSSE2, OptForSize]>;
1766 // extload f32 -> f64. This matches load+fextend because we have a hack in
1767 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1769 // Since these loads aren't folded into the fextend, we have to match it
1771 def : Pat<(fextend (loadf32 addr:$src)),
1772 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1773 def : Pat<(extloadf32 addr:$src),
1774 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1776 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1777 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1778 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1780 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1781 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>;
1782 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1783 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1784 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1786 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1787 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>;
1788 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1789 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1790 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1791 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1793 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1794 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>;
1795 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1796 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1797 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1799 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1800 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>;
1803 // Convert packed single/double fp to doubleword
1804 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1805 "cvtps2dq\t{$src, $dst|$dst, $src}",
1806 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1807 IIC_SSE_CVT_PS_RR>, VEX;
1808 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1809 "cvtps2dq\t{$src, $dst|$dst, $src}",
1811 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1812 IIC_SSE_CVT_PS_RM>, VEX;
1813 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1814 "cvtps2dq\t{$src, $dst|$dst, $src}",
1816 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1817 IIC_SSE_CVT_PS_RR>, VEX, VEX_L;
1818 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1819 "cvtps2dq\t{$src, $dst|$dst, $src}",
1821 (int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)))],
1822 IIC_SSE_CVT_PS_RM>, VEX, VEX_L;
1823 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1824 "cvtps2dq\t{$src, $dst|$dst, $src}",
1825 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1827 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1828 "cvtps2dq\t{$src, $dst|$dst, $src}",
1830 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1834 // Convert Packed Double FP to Packed DW Integers
1835 let Predicates = [HasAVX] in {
1836 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1837 // register, but the same isn't true when using memory operands instead.
1838 // Provide other assembly rr and rm forms to address this explicitly.
1839 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1840 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1841 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1845 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1846 (VCVTPD2DQrr VR128:$dst, VR128:$src)>;
1847 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1848 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1850 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))]>, VEX;
1853 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1854 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1856 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L;
1857 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1858 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1860 (int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)))]>,
1862 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
1863 (VCVTPD2DQYrr VR128:$dst, VR256:$src)>;
1866 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1867 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1869 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
1871 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1872 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1873 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
1876 // Convert with truncation packed single/double fp to doubleword
1877 // SSE2 packed instructions with XS prefix
1878 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1879 "cvttps2dq\t{$src, $dst|$dst, $src}",
1881 (int_x86_sse2_cvttps2dq VR128:$src))],
1882 IIC_SSE_CVT_PS_RR>, VEX;
1883 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1884 "cvttps2dq\t{$src, $dst|$dst, $src}",
1885 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1886 (memopv4f32 addr:$src)))],
1887 IIC_SSE_CVT_PS_RM>, VEX;
1888 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1889 "cvttps2dq\t{$src, $dst|$dst, $src}",
1891 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
1892 IIC_SSE_CVT_PS_RR>, VEX, VEX_L;
1893 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1894 "cvttps2dq\t{$src, $dst|$dst, $src}",
1895 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
1896 (memopv8f32 addr:$src)))],
1897 IIC_SSE_CVT_PS_RM>, VEX, VEX_L;
1899 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1900 "cvttps2dq\t{$src, $dst|$dst, $src}",
1901 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
1903 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1904 "cvttps2dq\t{$src, $dst|$dst, $src}",
1906 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
1909 let Predicates = [HasAVX] in {
1910 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1911 (VCVTDQ2PSrr VR128:$src)>;
1912 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
1913 (VCVTDQ2PSrm addr:$src)>;
1915 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
1916 (VCVTDQ2PSrr VR128:$src)>;
1917 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
1918 (VCVTDQ2PSrm addr:$src)>;
1920 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1921 (VCVTTPS2DQrr VR128:$src)>;
1922 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
1923 (VCVTTPS2DQrm addr:$src)>;
1925 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1926 (VCVTDQ2PSYrr VR256:$src)>;
1927 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (memopv4i64 addr:$src)))),
1928 (VCVTDQ2PSYrm addr:$src)>;
1930 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1931 (VCVTTPS2DQYrr VR256:$src)>;
1932 def : Pat<(v8i32 (fp_to_sint (memopv8f32 addr:$src))),
1933 (VCVTTPS2DQYrm addr:$src)>;
1936 let Predicates = [UseSSE2] in {
1937 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1938 (CVTDQ2PSrr VR128:$src)>;
1939 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
1940 (CVTDQ2PSrm addr:$src)>;
1942 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
1943 (CVTDQ2PSrr VR128:$src)>;
1944 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
1945 (CVTDQ2PSrm addr:$src)>;
1947 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1948 (CVTTPS2DQrr VR128:$src)>;
1949 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
1950 (CVTTPS2DQrm addr:$src)>;
1953 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1954 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1956 (int_x86_sse2_cvttpd2dq VR128:$src))],
1957 IIC_SSE_CVT_PD_RR>, VEX;
1959 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1960 // register, but the same isn't true when using memory operands instead.
1961 // Provide other assembly rr and rm forms to address this explicitly.
1964 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
1965 (VCVTTPD2DQrr VR128:$dst, VR128:$src)>;
1966 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1967 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
1968 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1969 (memopv2f64 addr:$src)))],
1970 IIC_SSE_CVT_PD_RM>, VEX;
1973 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1974 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
1976 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
1977 IIC_SSE_CVT_PD_RR>, VEX, VEX_L;
1978 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1979 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
1981 (int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)))],
1982 IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
1983 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
1984 (VCVTTPD2DQYrr VR128:$dst, VR256:$src)>;
1986 let Predicates = [HasAVX] in {
1987 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
1988 (VCVTTPD2DQYrr VR256:$src)>;
1989 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
1990 (VCVTTPD2DQYrm addr:$src)>;
1991 } // Predicates = [HasAVX]
1993 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1994 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1995 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
1997 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1998 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1999 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2000 (memopv2f64 addr:$src)))],
2003 // Convert packed single to packed double
2004 let Predicates = [HasAVX] in {
2005 // SSE2 instructions without OpSize prefix
2006 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2007 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2008 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2009 IIC_SSE_CVT_PD_RR>, TB, VEX;
2010 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2011 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2012 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2013 IIC_SSE_CVT_PD_RM>, TB, VEX;
2014 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2015 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2017 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
2018 IIC_SSE_CVT_PD_RR>, TB, VEX, VEX_L;
2019 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2020 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2022 (int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)))],
2023 IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L;
2026 let Predicates = [UseSSE2] in {
2027 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2028 "cvtps2pd\t{$src, $dst|$dst, $src}",
2029 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2030 IIC_SSE_CVT_PD_RR>, TB;
2031 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2032 "cvtps2pd\t{$src, $dst|$dst, $src}",
2033 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2034 IIC_SSE_CVT_PD_RM>, TB;
2037 // Convert Packed DW Integers to Packed Double FP
2038 let Predicates = [HasAVX] in {
2039 let neverHasSideEffects = 1, mayLoad = 1 in
2040 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2041 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2043 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2044 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2046 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX;
2047 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2048 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2050 (int_x86_avx_cvtdq2_pd_256
2051 (bitconvert (memopv2i64 addr:$src))))]>, VEX, VEX_L;
2052 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2053 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2055 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L;
2058 let neverHasSideEffects = 1, mayLoad = 1 in
2059 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2060 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2062 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2063 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2064 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2067 // AVX 256-bit register conversion intrinsics
2068 let Predicates = [HasAVX] in {
2069 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2070 (VCVTDQ2PDYrr VR128:$src)>;
2071 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2072 (VCVTDQ2PDYrm addr:$src)>;
2073 } // Predicates = [HasAVX]
2075 // Convert packed double to packed single
2076 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2077 // register, but the same isn't true when using memory operands instead.
2078 // Provide other assembly rr and rm forms to address this explicitly.
2079 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2080 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2081 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2082 IIC_SSE_CVT_PD_RR>, VEX;
2085 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2086 (VCVTPD2PSrr VR128:$dst, VR128:$src)>;
2087 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2088 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2090 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2091 IIC_SSE_CVT_PD_RM>, VEX;
2094 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2095 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2097 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2098 IIC_SSE_CVT_PD_RR>, VEX, VEX_L;
2099 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2100 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2102 (int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)))],
2103 IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
2104 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2105 (VCVTPD2PSYrr VR128:$dst, VR256:$src)>;
2107 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2108 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2109 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2111 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2112 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2114 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2118 // AVX 256-bit register conversion intrinsics
2119 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2120 // whenever possible to avoid declaring two versions of each one.
2121 let Predicates = [HasAVX] in {
2122 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2123 (VCVTDQ2PSYrr VR256:$src)>;
2124 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (memopv4i64 addr:$src))),
2125 (VCVTDQ2PSYrm addr:$src)>;
2127 // Match fround and fextend for 128/256-bit conversions
2128 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2129 (VCVTPD2PSrr VR128:$src)>;
2130 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2131 (VCVTPD2PSXrm addr:$src)>;
2132 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2133 (VCVTPD2PSYrr VR256:$src)>;
2134 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2135 (VCVTPD2PSYrm addr:$src)>;
2137 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2138 (VCVTPS2PDrr VR128:$src)>;
2139 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2140 (VCVTPS2PDYrr VR128:$src)>;
2141 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2142 (VCVTPS2PDYrm addr:$src)>;
2145 let Predicates = [UseSSE2] in {
2146 // Match fround and fextend for 128 conversions
2147 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2148 (CVTPD2PSrr VR128:$src)>;
2149 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2150 (CVTPD2PSrm addr:$src)>;
2152 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2153 (CVTPS2PDrr VR128:$src)>;
2156 //===----------------------------------------------------------------------===//
2157 // SSE 1 & 2 - Compare Instructions
2158 //===----------------------------------------------------------------------===//
2160 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2161 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2162 Operand CC, SDNode OpNode, ValueType VT,
2163 PatFrag ld_frag, string asm, string asm_alt,
2165 def rr : SIi8<0xC2, MRMSrcReg,
2166 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2167 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
2169 def rm : SIi8<0xC2, MRMSrcMem,
2170 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2171 [(set RC:$dst, (OpNode (VT RC:$src1),
2172 (ld_frag addr:$src2), imm:$cc))],
2175 // Accept explicit immediate argument form instead of comparison code.
2176 let neverHasSideEffects = 1 in {
2177 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2178 (ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, [],
2179 IIC_SSE_ALU_F32S_RR>;
2181 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2182 (ins RC:$src1, x86memop:$src2, i8imm:$cc), asm_alt, [],
2183 IIC_SSE_ALU_F32S_RM>;
2187 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmpss, f32, loadf32,
2188 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2189 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2191 XS, VEX_4V, VEX_LIG;
2192 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmpsd, f64, loadf64,
2193 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2194 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2195 SSE_ALU_F32S>, // same latency as 32 bit compare
2196 XD, VEX_4V, VEX_LIG;
2198 let Constraints = "$src1 = $dst" in {
2199 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmpss, f32, loadf32,
2200 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2201 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S>,
2203 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmpsd, f64, loadf64,
2204 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2205 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2206 SSE_ALU_F32S>, // same latency as 32 bit compare
2210 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2211 Intrinsic Int, string asm, OpndItins itins> {
2212 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2213 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2214 [(set VR128:$dst, (Int VR128:$src1,
2215 VR128:$src, imm:$cc))],
2217 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2218 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2219 [(set VR128:$dst, (Int VR128:$src1,
2220 (load addr:$src), imm:$cc))],
2224 // Aliases to match intrinsics which expect XMM operand(s).
2225 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2226 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2229 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2230 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2231 SSE_ALU_F32S>, // same latency as f32
2233 let Constraints = "$src1 = $dst" in {
2234 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2235 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2237 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2238 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2239 SSE_ALU_F32S>, // same latency as f32
2244 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2245 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2246 ValueType vt, X86MemOperand x86memop,
2247 PatFrag ld_frag, string OpcodeStr, Domain d> {
2248 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2249 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2250 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2251 IIC_SSE_COMIS_RR, d>;
2252 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2253 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2254 [(set EFLAGS, (OpNode (vt RC:$src1),
2255 (ld_frag addr:$src2)))],
2256 IIC_SSE_COMIS_RM, d>;
2259 let Defs = [EFLAGS] in {
2260 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2261 "ucomiss", SSEPackedSingle>, TB, VEX, VEX_LIG;
2262 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2263 "ucomisd", SSEPackedDouble>, TB, OpSize, VEX,
2265 let Pattern = []<dag> in {
2266 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2267 "comiss", SSEPackedSingle>, TB, VEX,
2269 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2270 "comisd", SSEPackedDouble>, TB, OpSize, VEX,
2274 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2275 load, "ucomiss", SSEPackedSingle>, TB, VEX;
2276 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2277 load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
2279 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2280 load, "comiss", SSEPackedSingle>, TB, VEX;
2281 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2282 load, "comisd", SSEPackedDouble>, TB, OpSize, VEX;
2283 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2284 "ucomiss", SSEPackedSingle>, TB;
2285 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2286 "ucomisd", SSEPackedDouble>, TB, OpSize;
2288 let Pattern = []<dag> in {
2289 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2290 "comiss", SSEPackedSingle>, TB;
2291 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2292 "comisd", SSEPackedDouble>, TB, OpSize;
2295 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2296 load, "ucomiss", SSEPackedSingle>, TB;
2297 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2298 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
2300 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2301 "comiss", SSEPackedSingle>, TB;
2302 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2303 "comisd", SSEPackedDouble>, TB, OpSize;
2304 } // Defs = [EFLAGS]
2306 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2307 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2308 Operand CC, Intrinsic Int, string asm,
2309 string asm_alt, Domain d> {
2310 def rri : PIi8<0xC2, MRMSrcReg,
2311 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2312 [(set RC:$dst, (Int RC:$src1, RC:$src2, imm:$cc))],
2313 IIC_SSE_CMPP_RR, d>;
2314 def rmi : PIi8<0xC2, MRMSrcMem,
2315 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2316 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2), imm:$cc))],
2317 IIC_SSE_CMPP_RM, d>;
2319 // Accept explicit immediate argument form instead of comparison code.
2320 let neverHasSideEffects = 1 in {
2321 def rri_alt : PIi8<0xC2, MRMSrcReg,
2322 (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
2323 asm_alt, [], IIC_SSE_CMPP_RR, d>;
2324 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2325 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
2326 asm_alt, [], IIC_SSE_CMPP_RM, d>;
2330 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2331 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2332 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2333 SSEPackedSingle>, TB, VEX_4V;
2334 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2335 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2336 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2337 SSEPackedDouble>, TB, OpSize, VEX_4V;
2338 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2339 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2340 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2341 SSEPackedSingle>, TB, VEX_4V, VEX_L;
2342 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2343 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2344 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2345 SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
2346 let Constraints = "$src1 = $dst" in {
2347 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2348 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2349 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2350 SSEPackedSingle>, TB;
2351 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2352 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2353 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2354 SSEPackedDouble>, TB, OpSize;
2357 let Predicates = [HasAVX] in {
2358 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2359 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2360 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2361 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2362 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2363 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2364 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2365 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2367 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2368 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2369 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
2370 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2371 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2372 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2373 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
2374 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2377 let Predicates = [UseSSE1] in {
2378 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2379 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2380 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2381 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2384 let Predicates = [UseSSE2] in {
2385 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2386 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2387 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2388 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2391 //===----------------------------------------------------------------------===//
2392 // SSE 1 & 2 - Shuffle Instructions
2393 //===----------------------------------------------------------------------===//
2395 /// sse12_shuffle - sse 1 & 2 shuffle instructions
2396 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2397 ValueType vt, string asm, PatFrag mem_frag,
2398 Domain d, bit IsConvertibleToThreeAddress = 0> {
2399 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2400 (ins RC:$src1, x86memop:$src2, i8imm:$src3), asm,
2401 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2402 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>;
2403 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
2404 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2405 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
2406 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2407 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>;
2410 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2411 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2412 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
2413 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2414 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2415 memopv8f32, SSEPackedSingle>, TB, VEX_4V, VEX_L;
2416 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2417 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2418 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
2419 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2420 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2421 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
2423 let Constraints = "$src1 = $dst" in {
2424 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2425 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2426 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
2428 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2429 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2430 memopv2f64, SSEPackedDouble, 1 /* cvt to pshufd */>,
2434 let Predicates = [HasAVX] in {
2435 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2436 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2437 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2438 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2439 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2441 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2442 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2443 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2444 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2445 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2448 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2449 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2450 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2451 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
2452 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2454 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2455 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2456 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2457 (memopv4i64 addr:$src2), (i8 imm:$imm))),
2458 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2461 let Predicates = [UseSSE1] in {
2462 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2463 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2464 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2465 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2466 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2469 let Predicates = [UseSSE2] in {
2470 // Generic SHUFPD patterns
2471 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2472 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2473 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2474 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2475 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2478 //===----------------------------------------------------------------------===//
2479 // SSE 1 & 2 - Unpack Instructions
2480 //===----------------------------------------------------------------------===//
2482 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
2483 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2484 PatFrag mem_frag, RegisterClass RC,
2485 X86MemOperand x86memop, string asm,
2487 def rr : PI<opc, MRMSrcReg,
2488 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2490 (vt (OpNode RC:$src1, RC:$src2)))],
2492 def rm : PI<opc, MRMSrcMem,
2493 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2495 (vt (OpNode RC:$src1,
2496 (mem_frag addr:$src2))))],
2500 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2501 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2502 SSEPackedSingle>, TB, VEX_4V;
2503 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2504 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2505 SSEPackedDouble>, TB, OpSize, VEX_4V;
2506 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2507 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2508 SSEPackedSingle>, TB, VEX_4V;
2509 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2510 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2511 SSEPackedDouble>, TB, OpSize, VEX_4V;
2513 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, memopv8f32,
2514 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2515 SSEPackedSingle>, TB, VEX_4V, VEX_L;
2516 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, memopv4f64,
2517 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2518 SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
2519 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, memopv8f32,
2520 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2521 SSEPackedSingle>, TB, VEX_4V, VEX_L;
2522 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, memopv4f64,
2523 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2524 SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L;
2526 let Constraints = "$src1 = $dst" in {
2527 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2528 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2529 SSEPackedSingle>, TB;
2530 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2531 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2532 SSEPackedDouble>, TB, OpSize;
2533 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2534 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2535 SSEPackedSingle>, TB;
2536 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2537 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2538 SSEPackedDouble>, TB, OpSize;
2539 } // Constraints = "$src1 = $dst"
2541 let Predicates = [HasAVX1Only] in {
2542 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
2543 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2544 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2545 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2546 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
2547 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2548 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2549 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2551 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))),
2552 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2553 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2554 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2555 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))),
2556 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2557 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2558 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2561 let Predicates = [HasAVX] in {
2562 // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
2563 // problem is during lowering, where it's not possible to recognize the load
2564 // fold cause it has two uses through a bitcast. One use disappears at isel
2565 // time and the fold opportunity reappears.
2566 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2567 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
2570 let Predicates = [UseSSE2] in {
2571 // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
2572 // problem is during lowering, where it's not possible to recognize the load
2573 // fold cause it has two uses through a bitcast. One use disappears at isel
2574 // time and the fold opportunity reappears.
2575 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2576 (UNPCKLPDrr VR128:$src, VR128:$src)>;
2579 //===----------------------------------------------------------------------===//
2580 // SSE 1 & 2 - Extract Floating-Point Sign mask
2581 //===----------------------------------------------------------------------===//
2583 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2584 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2586 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
2587 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2588 [(set GR32:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>;
2589 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
2590 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [],
2591 IIC_SSE_MOVMSK, d>, REX_W;
2594 let Predicates = [HasAVX] in {
2595 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2596 "movmskps", SSEPackedSingle>, TB, VEX;
2597 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2598 "movmskpd", SSEPackedDouble>, TB,
2600 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2601 "movmskps", SSEPackedSingle>, TB,
2603 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2604 "movmskpd", SSEPackedDouble>, TB,
2607 def : Pat<(i32 (X86fgetsign FR32:$src)),
2608 (VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>;
2609 def : Pat<(i64 (X86fgetsign FR32:$src)),
2610 (VMOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>;
2611 def : Pat<(i32 (X86fgetsign FR64:$src)),
2612 (VMOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>;
2613 def : Pat<(i64 (X86fgetsign FR64:$src)),
2614 (VMOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>;
2617 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2618 "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2619 SSEPackedSingle>, TB, VEX;
2620 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2621 "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2622 SSEPackedDouble>, TB,
2624 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2625 "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2626 SSEPackedSingle>, TB, VEX, VEX_L;
2627 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2628 "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
2629 SSEPackedDouble>, TB,
2633 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2634 SSEPackedSingle>, TB;
2635 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2636 SSEPackedDouble>, TB, OpSize;
2638 def : Pat<(i32 (X86fgetsign FR32:$src)),
2639 (MOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>,
2640 Requires<[UseSSE1]>;
2641 def : Pat<(i64 (X86fgetsign FR32:$src)),
2642 (MOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>,
2643 Requires<[UseSSE1]>;
2644 def : Pat<(i32 (X86fgetsign FR64:$src)),
2645 (MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>,
2646 Requires<[UseSSE2]>;
2647 def : Pat<(i64 (X86fgetsign FR64:$src)),
2648 (MOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>,
2649 Requires<[UseSSE2]>;
2651 //===---------------------------------------------------------------------===//
2652 // SSE2 - Packed Integer Logical Instructions
2653 //===---------------------------------------------------------------------===//
2655 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2657 /// PDI_binop_rm - Simple SSE2 binary operator.
2658 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2659 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2660 X86MemOperand x86memop,
2662 bit IsCommutable = 0,
2664 let isCommutable = IsCommutable in
2665 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2666 (ins RC:$src1, RC:$src2),
2668 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2669 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2670 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>;
2671 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2672 (ins RC:$src1, x86memop:$src2),
2674 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2675 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2676 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2677 (bitconvert (memop_frag addr:$src2)))))],
2680 } // ExeDomain = SSEPackedInt
2682 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2683 ValueType OpVT128, ValueType OpVT256,
2684 OpndItins itins, bit IsCommutable = 0> {
2685 let Predicates = [HasAVX] in
2686 defm VP#NAME# : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2687 VR128, memopv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
2689 let Constraints = "$src1 = $dst" in
2690 defm P#NAME# : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128, memopv2i64,
2691 i128mem, itins, IsCommutable>;
2693 let Predicates = [HasAVX2] in
2694 defm VP#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2695 OpVT256, VR256, memopv4i64, i256mem, itins,
2696 IsCommutable, 0>, VEX_4V, VEX_L;
2699 // These are ordered here for pattern ordering requirements with the fp versions
2701 defm AND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64, SSE_BIT_ITINS_P, 1>;
2702 defm OR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64, SSE_BIT_ITINS_P, 1>;
2703 defm XOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64, SSE_BIT_ITINS_P, 1>;
2704 defm ANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2705 SSE_BIT_ITINS_P, 0>;
2707 //===----------------------------------------------------------------------===//
2708 // SSE 1 & 2 - Logical Instructions
2709 //===----------------------------------------------------------------------===//
2711 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
2713 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
2714 SDNode OpNode, OpndItins itins> {
2715 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2716 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, itins, 0>,
2719 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2720 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, itins, 0>,
2723 let Constraints = "$src1 = $dst" in {
2724 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2725 f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
2728 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2729 f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
2734 // Alias bitwise logical operations using SSE logical ops on packed FP values.
2735 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
2737 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
2739 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
2742 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
2743 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef,
2746 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2748 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2750 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2751 !strconcat(OpcodeStr, "ps"), f256mem,
2752 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2753 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2754 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V, VEX_L;
2756 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2757 !strconcat(OpcodeStr, "pd"), f256mem,
2758 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2759 (bc_v4i64 (v4f64 VR256:$src2))))],
2760 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2761 (memopv4i64 addr:$src2)))], 0>,
2762 TB, OpSize, VEX_4V, VEX_L;
2764 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2765 // are all promoted to v2i64, and the patterns are covered by the int
2766 // version. This is needed in SSE only, because v2i64 isn't supported on
2767 // SSE1, but only on SSE2.
2768 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2769 !strconcat(OpcodeStr, "ps"), f128mem, [],
2770 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2771 (memopv2i64 addr:$src2)))], 0, 1>, TB, VEX_4V;
2773 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2774 !strconcat(OpcodeStr, "pd"), f128mem,
2775 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2776 (bc_v2i64 (v2f64 VR128:$src2))))],
2777 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2778 (memopv2i64 addr:$src2)))], 0>,
2781 let Constraints = "$src1 = $dst" in {
2782 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2783 !strconcat(OpcodeStr, "ps"), f128mem,
2784 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2785 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2786 (memopv2i64 addr:$src2)))]>, TB;
2788 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2789 !strconcat(OpcodeStr, "pd"), f128mem,
2790 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2791 (bc_v2i64 (v2f64 VR128:$src2))))],
2792 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2793 (memopv2i64 addr:$src2)))]>, TB, OpSize;
2797 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2798 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2799 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2800 let isCommutable = 0 in
2801 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2803 //===----------------------------------------------------------------------===//
2804 // SSE 1 & 2 - Arithmetic Instructions
2805 //===----------------------------------------------------------------------===//
2807 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2810 /// In addition, we also have a special variant of the scalar form here to
2811 /// represent the associated intrinsic operation. This form is unlike the
2812 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2813 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2815 /// These three forms can each be reg+reg or reg+mem.
2818 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2820 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2823 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2824 OpNode, FR32, f32mem,
2825 itins.s, Is2Addr>, XS;
2826 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2827 OpNode, FR64, f64mem,
2828 itins.d, Is2Addr>, XD;
2831 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
2834 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2835 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins.s, Is2Addr>,
2837 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2838 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins.d, Is2Addr>,
2842 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
2845 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
2846 v8f32, f256mem, memopv8f32, SSEPackedSingle, itins.s, 0>,
2848 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
2849 v4f64, f256mem, memopv4f64, SSEPackedDouble, itins.d, 0>,
2853 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2856 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2857 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
2858 itins.s, Is2Addr>, XS;
2859 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2860 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
2861 itins.d, Is2Addr>, XD;
2864 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
2867 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2868 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
2869 SSEPackedSingle, itins.s, Is2Addr>,
2872 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2873 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
2874 SSEPackedDouble, itins.d, Is2Addr>,
2878 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr,
2880 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2881 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
2882 SSEPackedSingle, itins.s, 0>, TB, VEX_L;
2884 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2885 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
2886 SSEPackedDouble, itins.d, 0>, TB, OpSize, VEX_L;
2889 // Binary Arithmetic instructions
2890 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S, 0>,
2891 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S, 0>,
2893 defm VADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P, 0>,
2894 basic_sse12_fp_binop_p_y<0x58, "add", fadd, SSE_ALU_ITINS_P>,
2896 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S, 0>,
2897 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S, 0>,
2899 defm VMUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P, 0>,
2900 basic_sse12_fp_binop_p_y<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
2903 let isCommutable = 0 in {
2904 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S, 0>,
2905 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S, 0>,
2907 defm VSUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P, 0>,
2908 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
2910 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S, 0>,
2911 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S, 0>,
2913 defm VDIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_ALU_ITINS_P, 0>,
2914 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
2916 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S, 0>,
2917 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S, 0>,
2919 defm VMAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P, 0>,
2920 basic_sse12_fp_binop_p_int<0x5F, "max", SSE_ALU_ITINS_P, 0>,
2921 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
2922 basic_sse12_fp_binop_p_y_int<0x5F, "max", SSE_ALU_ITINS_P>,
2924 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S, 0>,
2925 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S, 0>,
2927 defm VMIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P, 0>,
2928 basic_sse12_fp_binop_p_int<0x5D, "min", SSE_ALU_ITINS_P, 0>,
2929 basic_sse12_fp_binop_p_y_int<0x5D, "min", SSE_ALU_ITINS_P>,
2930 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
2934 let Constraints = "$src1 = $dst" in {
2935 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
2936 basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
2937 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
2938 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
2939 basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
2940 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
2942 let isCommutable = 0 in {
2943 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
2944 basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
2945 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
2946 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
2947 basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
2948 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
2949 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
2950 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
2951 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>,
2952 basic_sse12_fp_binop_p_int<0x5F, "max", SSE_ALU_ITINS_P>;
2953 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
2954 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
2955 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>,
2956 basic_sse12_fp_binop_p_int<0x5D, "min", SSE_ALU_ITINS_P>;
2960 let isCodeGenOnly = 1 in {
2961 defm VMAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S, 0>,
2963 defm VMAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P, 0>,
2964 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>, VEX_4V;
2965 defm VMINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S, 0>,
2967 defm VMINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P, 0>,
2968 basic_sse12_fp_binop_p_y<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>, VEX_4V;
2969 let Constraints = "$src1 = $dst" in {
2970 defm MAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>,
2971 basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>;
2972 defm MINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>,
2973 basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>;
2978 /// In addition, we also have a special variant of the scalar form here to
2979 /// represent the associated intrinsic operation. This form is unlike the
2980 /// plain scalar form, in that it takes an entire vector (instead of a
2981 /// scalar) and leaves the top elements undefined.
2983 /// And, we have a special variant form for a full-vector intrinsic form.
2985 def SSE_SQRTP : OpndItins<
2986 IIC_SSE_SQRTP_RR, IIC_SSE_SQRTP_RM
2989 def SSE_SQRTS : OpndItins<
2990 IIC_SSE_SQRTS_RR, IIC_SSE_SQRTS_RM
2993 def SSE_RCPP : OpndItins<
2994 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
2997 def SSE_RCPS : OpndItins<
2998 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
3001 /// sse1_fp_unop_s - SSE1 unops in scalar form.
3002 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
3003 SDNode OpNode, Intrinsic F32Int, OpndItins itins> {
3004 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
3005 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3006 [(set FR32:$dst, (OpNode FR32:$src))]>;
3007 // For scalar unary operations, fold a load into the operation
3008 // only in OptForSize mode. It eliminates an instruction, but it also
3009 // eliminates a whole-register clobber (the load), so it introduces a
3010 // partial register update condition.
3011 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
3012 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3013 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
3014 Requires<[UseSSE1, OptForSize]>;
3015 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3016 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3017 [(set VR128:$dst, (F32Int VR128:$src))], itins.rr>;
3018 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
3019 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3020 [(set VR128:$dst, (F32Int sse_load_f32:$src))], itins.rm>;
3023 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
3024 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
3025 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
3026 !strconcat(OpcodeStr,
3027 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3028 let mayLoad = 1 in {
3029 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
3030 !strconcat(OpcodeStr,
3031 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3032 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3033 (ins VR128:$src1, ssmem:$src2),
3034 !strconcat(OpcodeStr,
3035 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3039 /// sse1_fp_unop_p - SSE1 unops in packed form.
3040 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3042 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3043 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3044 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>;
3045 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3046 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3047 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>;
3050 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
3051 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
3053 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3054 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3055 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3057 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3058 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3059 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))],
3063 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
3064 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3065 Intrinsic V4F32Int, OpndItins itins> {
3066 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3067 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3068 [(set VR128:$dst, (V4F32Int VR128:$src))],
3070 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3071 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3072 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
3076 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
3077 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
3078 Intrinsic V4F32Int, OpndItins itins> {
3079 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3080 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3081 [(set VR256:$dst, (V4F32Int VR256:$src))],
3083 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3084 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3085 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))],
3089 /// sse2_fp_unop_s - SSE2 unops in scalar form.
3090 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
3091 SDNode OpNode, Intrinsic F64Int, OpndItins itins> {
3092 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
3093 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3094 [(set FR64:$dst, (OpNode FR64:$src))], itins.rr>;
3095 // See the comments in sse1_fp_unop_s for why this is OptForSize.
3096 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
3097 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3098 [(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
3099 Requires<[UseSSE2, OptForSize]>;
3100 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3101 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3102 [(set VR128:$dst, (F64Int VR128:$src))], itins.rr>;
3103 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
3104 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3105 [(set VR128:$dst, (F64Int sse_load_f64:$src))], itins.rm>;
3108 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
3109 let hasSideEffects = 0 in
3110 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
3111 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
3112 !strconcat(OpcodeStr,
3113 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3114 let mayLoad = 1 in {
3115 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
3116 !strconcat(OpcodeStr,
3117 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3118 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
3119 (ins VR128:$src1, sdmem:$src2),
3120 !strconcat(OpcodeStr,
3121 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
3125 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3126 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3127 SDNode OpNode, OpndItins itins> {
3128 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3129 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3130 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>;
3131 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3132 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3133 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>;
3136 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
3137 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
3139 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3140 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3141 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3143 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3144 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3145 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))],
3149 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
3150 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3151 Intrinsic V2F64Int, OpndItins itins> {
3152 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3153 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3154 [(set VR128:$dst, (V2F64Int VR128:$src))],
3156 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3157 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3158 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))],
3162 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
3163 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
3164 Intrinsic V2F64Int, OpndItins itins> {
3165 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3166 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3167 [(set VR256:$dst, (V2F64Int VR256:$src))],
3169 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3170 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3171 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))],
3175 let Predicates = [HasAVX] in {
3177 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
3178 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V, VEX_LIG;
3180 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3181 sse2_fp_unop_p<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3182 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3183 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
3184 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps,
3186 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd,
3188 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256,
3190 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256,
3194 // Reciprocal approximations. Note that these typically require refinement
3195 // in order to obtain suitable precision.
3196 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V, VEX_LIG;
3197 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt, SSE_SQRTP>,
3198 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt, SSE_SQRTP>,
3199 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256,
3201 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps,
3204 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V, VEX_LIG;
3205 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp, SSE_RCPP>,
3206 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp, SSE_RCPP>,
3207 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256,
3209 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps,
3213 def : Pat<(f32 (fsqrt FR32:$src)),
3214 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3215 def : Pat<(f32 (fsqrt (load addr:$src))),
3216 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3217 Requires<[HasAVX, OptForSize]>;
3218 def : Pat<(f64 (fsqrt FR64:$src)),
3219 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
3220 def : Pat<(f64 (fsqrt (load addr:$src))),
3221 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
3222 Requires<[HasAVX, OptForSize]>;
3224 def : Pat<(f32 (X86frsqrt FR32:$src)),
3225 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3226 def : Pat<(f32 (X86frsqrt (load addr:$src))),
3227 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3228 Requires<[HasAVX, OptForSize]>;
3230 def : Pat<(f32 (X86frcp FR32:$src)),
3231 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3232 def : Pat<(f32 (X86frcp (load addr:$src))),
3233 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3234 Requires<[HasAVX, OptForSize]>;
3236 let Predicates = [HasAVX] in {
3237 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
3238 (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
3239 (COPY_TO_REGCLASS VR128:$src, FR32)),
3241 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3242 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3244 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
3245 (COPY_TO_REGCLASS (VSQRTSDr (f64 (IMPLICIT_DEF)),
3246 (COPY_TO_REGCLASS VR128:$src, FR64)),
3248 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3249 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3251 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3252 (COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
3253 (COPY_TO_REGCLASS VR128:$src, FR32)),
3255 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
3256 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3258 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3259 (COPY_TO_REGCLASS (VRCPSSr (f32 (IMPLICIT_DEF)),
3260 (COPY_TO_REGCLASS VR128:$src, FR32)),
3262 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
3263 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3267 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss,
3269 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
3270 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps, SSE_SQRTS>,
3271 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd,
3273 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
3274 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd, SSE_SQRTS>;
3276 /// sse1_fp_unop_s_rw - SSE1 unops where vector form has a read-write operand.
3277 multiclass sse1_fp_unop_rw<bits<8> opc, string OpcodeStr, SDNode OpNode,
3278 Intrinsic F32Int, OpndItins itins> {
3279 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
3280 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3281 [(set FR32:$dst, (OpNode FR32:$src))]>;
3282 // For scalar unary operations, fold a load into the operation
3283 // only in OptForSize mode. It eliminates an instruction, but it also
3284 // eliminates a whole-register clobber (the load), so it introduces a
3285 // partial register update condition.
3286 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
3287 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3288 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
3289 Requires<[UseSSE1, OptForSize]>;
3290 let Constraints = "$src1 = $dst" in {
3291 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
3292 (ins VR128:$src1, VR128:$src2),
3293 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3295 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3296 (ins VR128:$src1, ssmem:$src2),
3297 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3302 // Reciprocal approximations. Note that these typically require refinement
3303 // in order to obtain suitable precision.
3304 defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss,
3306 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTS>,
3307 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
3309 let Predicates = [UseSSE1] in {
3310 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3311 (RSQRTSSr_Int VR128:$src, VR128:$src)>;
3314 defm RCP : sse1_fp_unop_rw<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss,
3316 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPS>,
3317 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps, SSE_RCPS>;
3318 let Predicates = [UseSSE1] in {
3319 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3320 (RCPSSr_Int VR128:$src, VR128:$src)>;
3323 // There is no f64 version of the reciprocal approximation instructions.
3325 //===----------------------------------------------------------------------===//
3326 // SSE 1 & 2 - Non-temporal stores
3327 //===----------------------------------------------------------------------===//
3329 let AddedComplexity = 400 in { // Prefer non-temporal versions
3330 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3331 (ins f128mem:$dst, VR128:$src),
3332 "movntps\t{$src, $dst|$dst, $src}",
3333 [(alignednontemporalstore (v4f32 VR128:$src),
3335 IIC_SSE_MOVNT>, VEX;
3336 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3337 (ins f128mem:$dst, VR128:$src),
3338 "movntpd\t{$src, $dst|$dst, $src}",
3339 [(alignednontemporalstore (v2f64 VR128:$src),
3341 IIC_SSE_MOVNT>, VEX;
3343 let ExeDomain = SSEPackedInt in
3344 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3345 (ins f128mem:$dst, VR128:$src),
3346 "movntdq\t{$src, $dst|$dst, $src}",
3347 [(alignednontemporalstore (v2i64 VR128:$src),
3349 IIC_SSE_MOVNT>, VEX;
3351 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3352 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
3354 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3355 (ins f256mem:$dst, VR256:$src),
3356 "movntps\t{$src, $dst|$dst, $src}",
3357 [(alignednontemporalstore (v8f32 VR256:$src),
3359 IIC_SSE_MOVNT>, VEX, VEX_L;
3360 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3361 (ins f256mem:$dst, VR256:$src),
3362 "movntpd\t{$src, $dst|$dst, $src}",
3363 [(alignednontemporalstore (v4f64 VR256:$src),
3365 IIC_SSE_MOVNT>, VEX, VEX_L;
3366 let ExeDomain = SSEPackedInt in
3367 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3368 (ins f256mem:$dst, VR256:$src),
3369 "movntdq\t{$src, $dst|$dst, $src}",
3370 [(alignednontemporalstore (v4i64 VR256:$src),
3372 IIC_SSE_MOVNT>, VEX, VEX_L;
3375 let AddedComplexity = 400 in { // Prefer non-temporal versions
3376 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3377 "movntps\t{$src, $dst|$dst, $src}",
3378 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3380 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3381 "movntpd\t{$src, $dst|$dst, $src}",
3382 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3385 let ExeDomain = SSEPackedInt in
3386 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3387 "movntdq\t{$src, $dst|$dst, $src}",
3388 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3391 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3392 (MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[UseSSE2]>;
3394 // There is no AVX form for instructions below this point
3395 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3396 "movnti{l}\t{$src, $dst|$dst, $src}",
3397 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3399 TB, Requires<[HasSSE2]>;
3400 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3401 "movnti{q}\t{$src, $dst|$dst, $src}",
3402 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3404 TB, Requires<[HasSSE2]>;
3407 //===----------------------------------------------------------------------===//
3408 // SSE 1 & 2 - Prefetch and memory fence
3409 //===----------------------------------------------------------------------===//
3411 // Prefetch intrinsic.
3412 let Predicates = [HasSSE1] in {
3413 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3414 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3415 IIC_SSE_PREFETCH>, TB;
3416 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3417 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3418 IIC_SSE_PREFETCH>, TB;
3419 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3420 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3421 IIC_SSE_PREFETCH>, TB;
3422 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3423 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3424 IIC_SSE_PREFETCH>, TB;
3428 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3429 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3430 IIC_SSE_PREFETCH>, TB, Requires<[HasSSE2]>;
3432 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3433 // was introduced with SSE2, it's backward compatible.
3434 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", [], IIC_SSE_PAUSE>, REP;
3436 // Load, store, and memory fence
3437 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3438 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
3439 TB, Requires<[HasSSE1]>;
3440 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3441 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
3442 TB, Requires<[HasSSE2]>;
3443 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3444 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
3445 TB, Requires<[HasSSE2]>;
3447 def : Pat<(X86SFence), (SFENCE)>;
3448 def : Pat<(X86LFence), (LFENCE)>;
3449 def : Pat<(X86MFence), (MFENCE)>;
3451 //===----------------------------------------------------------------------===//
3452 // SSE 1 & 2 - Load/Store XCSR register
3453 //===----------------------------------------------------------------------===//
3455 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3456 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3457 IIC_SSE_LDMXCSR>, VEX;
3458 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3459 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3460 IIC_SSE_STMXCSR>, VEX;
3462 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3463 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3465 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3466 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3469 //===---------------------------------------------------------------------===//
3470 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3471 //===---------------------------------------------------------------------===//
3473 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3475 let neverHasSideEffects = 1 in {
3476 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3477 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3479 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3480 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3482 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3483 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3485 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3486 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3491 let isCodeGenOnly = 1 in {
3492 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3493 "movdqa\t{$src, $dst|$dst, $src}", [],
3496 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3497 "movdqa\t{$src, $dst|$dst, $src}", [],
3498 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
3499 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3500 "movdqu\t{$src, $dst|$dst, $src}", [],
3503 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3504 "movdqu\t{$src, $dst|$dst, $src}", [],
3505 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
3508 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3509 neverHasSideEffects = 1 in {
3510 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3511 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3513 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3514 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3516 let Predicates = [HasAVX] in {
3517 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3518 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3520 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3521 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3526 let mayStore = 1, neverHasSideEffects = 1 in {
3527 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3528 (ins i128mem:$dst, VR128:$src),
3529 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3531 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3532 (ins i256mem:$dst, VR256:$src),
3533 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3535 let Predicates = [HasAVX] in {
3536 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3537 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3539 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3540 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3545 let neverHasSideEffects = 1 in
3546 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3547 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
3549 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3550 "movdqu\t{$src, $dst|$dst, $src}",
3551 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3554 let isCodeGenOnly = 1 in {
3555 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3556 "movdqa\t{$src, $dst|$dst, $src}", [],
3559 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3560 "movdqu\t{$src, $dst|$dst, $src}",
3561 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3564 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3565 neverHasSideEffects = 1 in {
3566 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3567 "movdqa\t{$src, $dst|$dst, $src}",
3568 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
3570 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3571 "movdqu\t{$src, $dst|$dst, $src}",
3572 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
3574 XS, Requires<[UseSSE2]>;
3577 let mayStore = 1 in {
3578 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3579 "movdqa\t{$src, $dst|$dst, $src}",
3580 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
3582 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3583 "movdqu\t{$src, $dst|$dst, $src}",
3584 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
3586 XS, Requires<[UseSSE2]>;
3589 } // ExeDomain = SSEPackedInt
3591 let Predicates = [HasAVX] in {
3592 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3593 (VMOVDQUmr addr:$dst, VR128:$src)>;
3594 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3595 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3597 let Predicates = [UseSSE2] in
3598 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
3599 (MOVDQUmr addr:$dst, VR128:$src)>;
3601 //===---------------------------------------------------------------------===//
3602 // SSE2 - Packed Integer Arithmetic Instructions
3603 //===---------------------------------------------------------------------===//
3605 def SSE_PMADD : OpndItins<
3606 IIC_SSE_PMADD, IIC_SSE_PMADD
3609 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3611 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3612 RegisterClass RC, PatFrag memop_frag,
3613 X86MemOperand x86memop,
3615 bit IsCommutable = 0,
3617 let isCommutable = IsCommutable in
3618 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3619 (ins RC:$src1, RC:$src2),
3621 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3622 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3623 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>;
3624 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3625 (ins RC:$src1, x86memop:$src2),
3627 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3628 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3629 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
3633 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3634 string OpcodeStr, SDNode OpNode,
3635 SDNode OpNode2, RegisterClass RC,
3636 ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
3637 ShiftOpndItins itins,
3639 // src2 is always 128-bit
3640 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3641 (ins RC:$src1, VR128:$src2),
3643 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3644 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3645 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
3647 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3648 (ins RC:$src1, i128mem:$src2),
3650 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3651 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3652 [(set RC:$dst, (DstVT (OpNode RC:$src1,
3653 (bc_frag (memopv2i64 addr:$src2)))))], itins.rm>;
3654 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
3655 (ins RC:$src1, i32i8imm:$src2),
3657 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3658 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3659 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i32 imm:$src2))))], itins.ri>;
3662 /// PDI_binop_rm - Simple SSE2 binary operator with different src and dst types
3663 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
3664 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
3665 PatFrag memop_frag, X86MemOperand x86memop,
3667 bit IsCommutable = 0, bit Is2Addr = 1> {
3668 let isCommutable = IsCommutable in
3669 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3670 (ins RC:$src1, RC:$src2),
3672 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3673 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3674 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>;
3675 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3676 (ins RC:$src1, x86memop:$src2),
3678 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3679 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3680 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
3681 (bitconvert (memop_frag addr:$src2)))))]>;
3683 } // ExeDomain = SSEPackedInt
3685 defm ADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
3686 SSE_INTALU_ITINS_P, 1>;
3687 defm ADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
3688 SSE_INTALU_ITINS_P, 1>;
3689 defm ADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
3690 SSE_INTALU_ITINS_P, 1>;
3691 defm ADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
3692 SSE_INTALUQ_ITINS_P, 1>;
3693 defm MULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
3694 SSE_INTMUL_ITINS_P, 1>;
3695 defm SUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
3696 SSE_INTALU_ITINS_P, 0>;
3697 defm SUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
3698 SSE_INTALU_ITINS_P, 0>;
3699 defm SUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
3700 SSE_INTALU_ITINS_P, 0>;
3701 defm SUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
3702 SSE_INTALUQ_ITINS_P, 0>;
3703 defm SUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
3704 SSE_INTALU_ITINS_P, 0>;
3705 defm SUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
3706 SSE_INTALU_ITINS_P, 0>;
3707 defm MINUB : PDI_binop_all<0xDA, "pminub", X86umin, v16i8, v32i8,
3708 SSE_INTALU_ITINS_P, 1>;
3709 defm MINSW : PDI_binop_all<0xEA, "pminsw", X86smin, v8i16, v16i16,
3710 SSE_INTALU_ITINS_P, 1>;
3711 defm MAXUB : PDI_binop_all<0xDE, "pmaxub", X86umax, v16i8, v32i8,
3712 SSE_INTALU_ITINS_P, 1>;
3713 defm MAXSW : PDI_binop_all<0xEE, "vpmaxsw", X86smax, v8i16, v16i16,
3714 SSE_INTALU_ITINS_P, 1>;
3716 // 128-bit Integer Arithmetic
3718 let Predicates = [HasAVX] in {
3719 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
3720 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
3724 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b,
3725 VR128, memopv2i64, i128mem,
3726 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3727 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w,
3728 VR128, memopv2i64, i128mem,
3729 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
3730 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b,
3731 VR128, memopv2i64, i128mem,
3732 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3733 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w,
3734 VR128, memopv2i64, i128mem,
3735 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3736 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b,
3737 VR128, memopv2i64, i128mem,
3738 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3739 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w,
3740 VR128, memopv2i64, i128mem,
3741 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3742 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w,
3743 VR128, memopv2i64, i128mem,
3744 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3745 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w,
3746 VR128, memopv2i64, i128mem,
3747 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
3748 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd,
3749 VR128, memopv2i64, i128mem,
3750 SSE_PMADD, 1, 0>, VEX_4V;
3751 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b,
3752 VR128, memopv2i64, i128mem,
3753 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3754 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w,
3755 VR128, memopv2i64, i128mem,
3756 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3757 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw,
3758 VR128, memopv2i64, i128mem,
3759 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
3762 let Predicates = [HasAVX2] in {
3763 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
3764 VR256, memopv4i64, i256mem,
3765 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3768 defm VPSUBSBY : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_avx2_psubs_b,
3769 VR256, memopv4i64, i256mem,
3770 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3771 defm VPSUBSWY : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_avx2_psubs_w,
3772 VR256, memopv4i64, i256mem,
3773 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
3774 defm VPADDSBY : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_avx2_padds_b,
3775 VR256, memopv4i64, i256mem,
3776 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3777 defm VPADDSWY : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_avx2_padds_w,
3778 VR256, memopv4i64, i256mem,
3779 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3780 defm VPADDUSBY : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_avx2_paddus_b,
3781 VR256, memopv4i64, i256mem,
3782 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3783 defm VPADDUSWY : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_avx2_paddus_w,
3784 VR256, memopv4i64, i256mem,
3785 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3786 defm VPMULHUWY : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_avx2_pmulhu_w,
3787 VR256, memopv4i64, i256mem,
3788 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3789 defm VPMULHWY : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_avx2_pmulh_w,
3790 VR256, memopv4i64, i256mem,
3791 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3792 defm VPMADDWDY : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_avx2_pmadd_wd,
3793 VR256, memopv4i64, i256mem,
3794 SSE_PMADD, 1, 0>, VEX_4V, VEX_L;
3795 defm VPAVGBY : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_avx2_pavg_b,
3796 VR256, memopv4i64, i256mem,
3797 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3798 defm VPAVGWY : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_avx2_pavg_w,
3799 VR256, memopv4i64, i256mem,
3800 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3801 defm VPSADBWY : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_avx2_psad_bw,
3802 VR256, memopv4i64, i256mem,
3803 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
3806 let Constraints = "$src1 = $dst" in {
3807 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
3808 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
3811 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b,
3812 VR128, memopv2i64, i128mem,
3813 SSE_INTALU_ITINS_P>;
3814 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
3815 VR128, memopv2i64, i128mem,
3816 SSE_INTALU_ITINS_P>;
3817 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
3818 VR128, memopv2i64, i128mem,
3819 SSE_INTALU_ITINS_P, 1>;
3820 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w,
3821 VR128, memopv2i64, i128mem,
3822 SSE_INTALU_ITINS_P, 1>;
3823 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
3824 VR128, memopv2i64, i128mem,
3825 SSE_INTALU_ITINS_P, 1>;
3826 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
3827 VR128, memopv2i64, i128mem,
3828 SSE_INTALU_ITINS_P, 1>;
3829 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w,
3830 VR128, memopv2i64, i128mem,
3831 SSE_INTMUL_ITINS_P, 1>;
3832 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w,
3833 VR128, memopv2i64, i128mem,
3834 SSE_INTMUL_ITINS_P, 1>;
3835 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
3836 VR128, memopv2i64, i128mem,
3838 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
3839 VR128, memopv2i64, i128mem,
3840 SSE_INTALU_ITINS_P, 1>;
3841 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
3842 VR128, memopv2i64, i128mem,
3843 SSE_INTALU_ITINS_P, 1>;
3844 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
3845 VR128, memopv2i64, i128mem,
3846 SSE_INTALU_ITINS_P, 1>;
3848 } // Constraints = "$src1 = $dst"
3850 //===---------------------------------------------------------------------===//
3851 // SSE2 - Packed Integer Logical Instructions
3852 //===---------------------------------------------------------------------===//
3854 let Predicates = [HasAVX] in {
3855 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
3856 VR128, v8i16, v8i16, bc_v8i16,
3857 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3858 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
3859 VR128, v4i32, v4i32, bc_v4i32,
3860 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3861 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
3862 VR128, v2i64, v2i64, bc_v2i64,
3863 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3865 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
3866 VR128, v8i16, v8i16, bc_v8i16,
3867 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3868 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
3869 VR128, v4i32, v4i32, bc_v4i32,
3870 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3871 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
3872 VR128, v2i64, v2i64, bc_v2i64,
3873 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3875 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
3876 VR128, v8i16, v8i16, bc_v8i16,
3877 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3878 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
3879 VR128, v4i32, v4i32, bc_v4i32,
3880 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
3882 let ExeDomain = SSEPackedInt in {
3883 // 128-bit logical shifts.
3884 def VPSLLDQri : PDIi8<0x73, MRM7r,
3885 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3886 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3888 (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>,
3890 def VPSRLDQri : PDIi8<0x73, MRM3r,
3891 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3892 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3894 (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>,
3896 // PSRADQri doesn't exist in SSE[1-3].
3898 } // Predicates = [HasAVX]
3900 let Predicates = [HasAVX2] in {
3901 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
3902 VR256, v16i16, v8i16, bc_v8i16,
3903 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
3904 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
3905 VR256, v8i32, v4i32, bc_v4i32,
3906 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
3907 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
3908 VR256, v4i64, v2i64, bc_v2i64,
3909 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
3911 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
3912 VR256, v16i16, v8i16, bc_v8i16,
3913 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
3914 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
3915 VR256, v8i32, v4i32, bc_v4i32,
3916 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
3917 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
3918 VR256, v4i64, v2i64, bc_v2i64,
3919 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
3921 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
3922 VR256, v16i16, v8i16, bc_v8i16,
3923 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
3924 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
3925 VR256, v8i32, v4i32, bc_v4i32,
3926 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
3928 let ExeDomain = SSEPackedInt in {
3929 // 256-bit logical shifts.
3930 def VPSLLDQYri : PDIi8<0x73, MRM7r,
3931 (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
3932 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3934 (int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>,
3936 def VPSRLDQYri : PDIi8<0x73, MRM3r,
3937 (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
3938 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3940 (int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>,
3942 // PSRADQYri doesn't exist in SSE[1-3].
3944 } // Predicates = [HasAVX2]
3946 let Constraints = "$src1 = $dst" in {
3947 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
3948 VR128, v8i16, v8i16, bc_v8i16,
3949 SSE_INTSHIFT_ITINS_P>;
3950 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
3951 VR128, v4i32, v4i32, bc_v4i32,
3952 SSE_INTSHIFT_ITINS_P>;
3953 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
3954 VR128, v2i64, v2i64, bc_v2i64,
3955 SSE_INTSHIFT_ITINS_P>;
3957 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
3958 VR128, v8i16, v8i16, bc_v8i16,
3959 SSE_INTSHIFT_ITINS_P>;
3960 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
3961 VR128, v4i32, v4i32, bc_v4i32,
3962 SSE_INTSHIFT_ITINS_P>;
3963 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
3964 VR128, v2i64, v2i64, bc_v2i64,
3965 SSE_INTSHIFT_ITINS_P>;
3967 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
3968 VR128, v8i16, v8i16, bc_v8i16,
3969 SSE_INTSHIFT_ITINS_P>;
3970 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
3971 VR128, v4i32, v4i32, bc_v4i32,
3972 SSE_INTSHIFT_ITINS_P>;
3974 let ExeDomain = SSEPackedInt in {
3975 // 128-bit logical shifts.
3976 def PSLLDQri : PDIi8<0x73, MRM7r,
3977 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3978 "pslldq\t{$src2, $dst|$dst, $src2}",
3980 (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>;
3981 def PSRLDQri : PDIi8<0x73, MRM3r,
3982 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3983 "psrldq\t{$src2, $dst|$dst, $src2}",
3985 (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>;
3986 // PSRADQri doesn't exist in SSE[1-3].
3988 } // Constraints = "$src1 = $dst"
3990 let Predicates = [HasAVX] in {
3991 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3992 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
3993 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3994 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
3995 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3996 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
3998 // Shift up / down and insert zero's.
3999 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4000 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4001 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4002 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4005 let Predicates = [HasAVX2] in {
4006 def : Pat<(int_x86_avx2_psll_dq VR256:$src1, imm:$src2),
4007 (VPSLLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4008 def : Pat<(int_x86_avx2_psrl_dq VR256:$src1, imm:$src2),
4009 (VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4012 let Predicates = [UseSSE2] in {
4013 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4014 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4015 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4016 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4017 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4018 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4020 // Shift up / down and insert zero's.
4021 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4022 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4023 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4024 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4027 //===---------------------------------------------------------------------===//
4028 // SSE2 - Packed Integer Comparison Instructions
4029 //===---------------------------------------------------------------------===//
4031 let Predicates = [HasAVX] in {
4032 defm VPCMPEQB : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v16i8,
4033 VR128, memopv2i64, i128mem,
4034 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4035 defm VPCMPEQW : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v8i16,
4036 VR128, memopv2i64, i128mem,
4037 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4038 defm VPCMPEQD : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v4i32,
4039 VR128, memopv2i64, i128mem,
4040 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
4041 defm VPCMPGTB : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v16i8,
4042 VR128, memopv2i64, i128mem,
4043 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4044 defm VPCMPGTW : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v8i16,
4045 VR128, memopv2i64, i128mem,
4046 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4047 defm VPCMPGTD : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v4i32,
4048 VR128, memopv2i64, i128mem,
4049 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4052 let Predicates = [HasAVX2] in {
4053 defm VPCMPEQBY : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v32i8,
4054 VR256, memopv4i64, i256mem,
4055 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4056 defm VPCMPEQWY : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v16i16,
4057 VR256, memopv4i64, i256mem,
4058 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4059 defm VPCMPEQDY : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v8i32,
4060 VR256, memopv4i64, i256mem,
4061 SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4062 defm VPCMPGTBY : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v32i8,
4063 VR256, memopv4i64, i256mem,
4064 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4065 defm VPCMPGTWY : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v16i16,
4066 VR256, memopv4i64, i256mem,
4067 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4068 defm VPCMPGTDY : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v8i32,
4069 VR256, memopv4i64, i256mem,
4070 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4073 let Constraints = "$src1 = $dst" in {
4074 defm PCMPEQB : PDI_binop_rm<0x74, "pcmpeqb", X86pcmpeq, v16i8,
4075 VR128, memopv2i64, i128mem,
4076 SSE_INTALU_ITINS_P, 1>;
4077 defm PCMPEQW : PDI_binop_rm<0x75, "pcmpeqw", X86pcmpeq, v8i16,
4078 VR128, memopv2i64, i128mem,
4079 SSE_INTALU_ITINS_P, 1>;
4080 defm PCMPEQD : PDI_binop_rm<0x76, "pcmpeqd", X86pcmpeq, v4i32,
4081 VR128, memopv2i64, i128mem,
4082 SSE_INTALU_ITINS_P, 1>;
4083 defm PCMPGTB : PDI_binop_rm<0x64, "pcmpgtb", X86pcmpgt, v16i8,
4084 VR128, memopv2i64, i128mem,
4085 SSE_INTALU_ITINS_P>;
4086 defm PCMPGTW : PDI_binop_rm<0x65, "pcmpgtw", X86pcmpgt, v8i16,
4087 VR128, memopv2i64, i128mem,
4088 SSE_INTALU_ITINS_P>;
4089 defm PCMPGTD : PDI_binop_rm<0x66, "pcmpgtd", X86pcmpgt, v4i32,
4090 VR128, memopv2i64, i128mem,
4091 SSE_INTALU_ITINS_P>;
4092 } // Constraints = "$src1 = $dst"
4094 //===---------------------------------------------------------------------===//
4095 // SSE2 - Packed Integer Pack Instructions
4096 //===---------------------------------------------------------------------===//
4098 let Predicates = [HasAVX] in {
4099 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
4100 VR128, memopv2i64, i128mem,
4101 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4102 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
4103 VR128, memopv2i64, i128mem,
4104 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4105 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
4106 VR128, memopv2i64, i128mem,
4107 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
4110 let Predicates = [HasAVX2] in {
4111 defm VPACKSSWBY : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_avx2_packsswb,
4112 VR256, memopv4i64, i256mem,
4113 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4114 defm VPACKSSDWY : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_avx2_packssdw,
4115 VR256, memopv4i64, i256mem,
4116 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4117 defm VPACKUSWBY : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_avx2_packuswb,
4118 VR256, memopv4i64, i256mem,
4119 SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L;
4122 let Constraints = "$src1 = $dst" in {
4123 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128,
4124 VR128, memopv2i64, i128mem,
4125 SSE_INTALU_ITINS_P>;
4126 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128,
4127 VR128, memopv2i64, i128mem,
4128 SSE_INTALU_ITINS_P>;
4129 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128,
4130 VR128, memopv2i64, i128mem,
4131 SSE_INTALU_ITINS_P>;
4132 } // Constraints = "$src1 = $dst"
4134 //===---------------------------------------------------------------------===//
4135 // SSE2 - Packed Integer Shuffle Instructions
4136 //===---------------------------------------------------------------------===//
4138 let ExeDomain = SSEPackedInt in {
4139 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, SDNode OpNode> {
4140 def ri : Ii8<0x70, MRMSrcReg,
4141 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
4142 !strconcat(OpcodeStr,
4143 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4144 [(set VR128:$dst, (vt (OpNode VR128:$src1, (i8 imm:$src2))))],
4146 def mi : Ii8<0x70, MRMSrcMem,
4147 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
4148 !strconcat(OpcodeStr,
4149 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4151 (vt (OpNode (bitconvert (memopv2i64 addr:$src1)),
4156 multiclass sse2_pshuffle_y<string OpcodeStr, ValueType vt, SDNode OpNode> {
4157 def Yri : Ii8<0x70, MRMSrcReg,
4158 (outs VR256:$dst), (ins VR256:$src1, i8imm:$src2),
4159 !strconcat(OpcodeStr,
4160 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4161 [(set VR256:$dst, (vt (OpNode VR256:$src1, (i8 imm:$src2))))]>;
4162 def Ymi : Ii8<0x70, MRMSrcMem,
4163 (outs VR256:$dst), (ins i256mem:$src1, i8imm:$src2),
4164 !strconcat(OpcodeStr,
4165 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4167 (vt (OpNode (bitconvert (memopv4i64 addr:$src1)),
4168 (i8 imm:$src2))))]>;
4170 } // ExeDomain = SSEPackedInt
4172 let Predicates = [HasAVX] in {
4173 let AddedComplexity = 5 in
4174 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, X86PShufd>, TB, OpSize, VEX;
4176 // SSE2 with ImmT == Imm8 and XS prefix.
4177 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, X86PShufhw>, XS, VEX;
4179 // SSE2 with ImmT == Imm8 and XD prefix.
4180 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, X86PShuflw>, XD, VEX;
4182 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4183 (VPSHUFDmi addr:$src1, imm:$imm)>;
4184 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4185 (VPSHUFDri VR128:$src1, imm:$imm)>;
4188 let Predicates = [HasAVX2] in {
4189 defm VPSHUFD : sse2_pshuffle_y<"vpshufd", v8i32, X86PShufd>,
4190 TB, OpSize, VEX,VEX_L;
4191 defm VPSHUFHW : sse2_pshuffle_y<"vpshufhw", v16i16, X86PShufhw>,
4193 defm VPSHUFLW : sse2_pshuffle_y<"vpshuflw", v16i16, X86PShuflw>,
4197 let Predicates = [UseSSE2] in {
4198 let AddedComplexity = 5 in
4199 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, X86PShufd>, TB, OpSize;
4201 // SSE2 with ImmT == Imm8 and XS prefix.
4202 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, X86PShufhw>, XS;
4204 // SSE2 with ImmT == Imm8 and XD prefix.
4205 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, X86PShuflw>, XD;
4207 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4208 (PSHUFDmi addr:$src1, imm:$imm)>;
4209 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4210 (PSHUFDri VR128:$src1, imm:$imm)>;
4213 //===---------------------------------------------------------------------===//
4214 // SSE2 - Packed Integer Unpack Instructions
4215 //===---------------------------------------------------------------------===//
4217 let ExeDomain = SSEPackedInt in {
4218 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4219 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
4220 def rr : PDI<opc, MRMSrcReg,
4221 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4223 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4224 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4225 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4227 def rm : PDI<opc, MRMSrcMem,
4228 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4230 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4231 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4232 [(set VR128:$dst, (OpNode VR128:$src1,
4233 (bc_frag (memopv2i64
4238 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4239 SDNode OpNode, PatFrag bc_frag> {
4240 def Yrr : PDI<opc, MRMSrcReg,
4241 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4242 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4243 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>;
4244 def Yrm : PDI<opc, MRMSrcMem,
4245 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4246 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4247 [(set VR256:$dst, (OpNode VR256:$src1,
4248 (bc_frag (memopv4i64 addr:$src2))))]>;
4251 let Predicates = [HasAVX] in {
4252 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4253 bc_v16i8, 0>, VEX_4V;
4254 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4255 bc_v8i16, 0>, VEX_4V;
4256 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4257 bc_v4i32, 0>, VEX_4V;
4258 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4259 bc_v2i64, 0>, VEX_4V;
4261 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4262 bc_v16i8, 0>, VEX_4V;
4263 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4264 bc_v8i16, 0>, VEX_4V;
4265 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4266 bc_v4i32, 0>, VEX_4V;
4267 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4268 bc_v2i64, 0>, VEX_4V;
4271 let Predicates = [HasAVX2] in {
4272 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
4273 bc_v32i8>, VEX_4V, VEX_L;
4274 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
4275 bc_v16i16>, VEX_4V, VEX_L;
4276 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
4277 bc_v8i32>, VEX_4V, VEX_L;
4278 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
4279 bc_v4i64>, VEX_4V, VEX_L;
4281 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
4282 bc_v32i8>, VEX_4V, VEX_L;
4283 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
4284 bc_v16i16>, VEX_4V, VEX_L;
4285 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
4286 bc_v8i32>, VEX_4V, VEX_L;
4287 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
4288 bc_v4i64>, VEX_4V, VEX_L;
4291 let Constraints = "$src1 = $dst" in {
4292 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4294 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4296 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4298 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4301 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4303 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4305 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4307 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4310 } // ExeDomain = SSEPackedInt
4312 //===---------------------------------------------------------------------===//
4313 // SSE2 - Packed Integer Extract and Insert
4314 //===---------------------------------------------------------------------===//
4316 let ExeDomain = SSEPackedInt in {
4317 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4318 def rri : Ii8<0xC4, MRMSrcReg,
4319 (outs VR128:$dst), (ins VR128:$src1,
4320 GR32:$src2, i32i8imm:$src3),
4322 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4323 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4325 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))], IIC_SSE_PINSRW>;
4326 def rmi : Ii8<0xC4, MRMSrcMem,
4327 (outs VR128:$dst), (ins VR128:$src1,
4328 i16mem:$src2, i32i8imm:$src3),
4330 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4331 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4333 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4334 imm:$src3))], IIC_SSE_PINSRW>;
4338 let Predicates = [HasAVX] in
4339 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4340 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
4341 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4342 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
4343 imm:$src2))]>, TB, OpSize, VEX;
4344 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4345 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
4346 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4347 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
4348 imm:$src2))], IIC_SSE_PEXTRW>;
4351 let Predicates = [HasAVX] in {
4352 defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
4353 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
4354 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4355 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
4356 []>, TB, OpSize, VEX_4V;
4359 let Constraints = "$src1 = $dst" in
4360 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[UseSSE2]>;
4362 } // ExeDomain = SSEPackedInt
4364 //===---------------------------------------------------------------------===//
4365 // SSE2 - Packed Mask Creation
4366 //===---------------------------------------------------------------------===//
4368 let ExeDomain = SSEPackedInt in {
4370 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
4371 "pmovmskb\t{$src, $dst|$dst, $src}",
4372 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4373 IIC_SSE_MOVMSK>, VEX;
4374 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
4375 "pmovmskb\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK>, VEX;
4377 let Predicates = [HasAVX2] in {
4378 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
4379 "pmovmskb\t{$src, $dst|$dst, $src}",
4380 [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX, VEX_L;
4381 def VPMOVMSKBYr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
4382 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
4385 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
4386 "pmovmskb\t{$src, $dst|$dst, $src}",
4387 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4390 } // ExeDomain = SSEPackedInt
4392 //===---------------------------------------------------------------------===//
4393 // SSE2 - Conditional Store
4394 //===---------------------------------------------------------------------===//
4396 let ExeDomain = SSEPackedInt in {
4399 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4400 (ins VR128:$src, VR128:$mask),
4401 "maskmovdqu\t{$mask, $src|$src, $mask}",
4402 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4403 IIC_SSE_MASKMOV>, VEX;
4405 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4406 (ins VR128:$src, VR128:$mask),
4407 "maskmovdqu\t{$mask, $src|$src, $mask}",
4408 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4409 IIC_SSE_MASKMOV>, VEX;
4412 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4413 "maskmovdqu\t{$mask, $src|$src, $mask}",
4414 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4417 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4418 "maskmovdqu\t{$mask, $src|$src, $mask}",
4419 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4422 } // ExeDomain = SSEPackedInt
4424 //===---------------------------------------------------------------------===//
4425 // SSE2 - Move Doubleword
4426 //===---------------------------------------------------------------------===//
4428 //===---------------------------------------------------------------------===//
4429 // Move Int Doubleword to Packed Double Int
4431 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4432 "movd\t{$src, $dst|$dst, $src}",
4434 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4436 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4437 "movd\t{$src, $dst|$dst, $src}",
4439 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4442 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4443 "mov{d|q}\t{$src, $dst|$dst, $src}",
4445 (v2i64 (scalar_to_vector GR64:$src)))],
4446 IIC_SSE_MOVDQ>, VEX;
4447 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4448 "mov{d|q}\t{$src, $dst|$dst, $src}",
4449 [(set FR64:$dst, (bitconvert GR64:$src))],
4450 IIC_SSE_MOVDQ>, VEX;
4452 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4453 "movd\t{$src, $dst|$dst, $src}",
4455 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>;
4456 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4457 "movd\t{$src, $dst|$dst, $src}",
4459 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4461 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4462 "mov{d|q}\t{$src, $dst|$dst, $src}",
4464 (v2i64 (scalar_to_vector GR64:$src)))],
4466 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4467 "mov{d|q}\t{$src, $dst|$dst, $src}",
4468 [(set FR64:$dst, (bitconvert GR64:$src))],
4471 //===---------------------------------------------------------------------===//
4472 // Move Int Doubleword to Single Scalar
4474 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4475 "movd\t{$src, $dst|$dst, $src}",
4476 [(set FR32:$dst, (bitconvert GR32:$src))],
4477 IIC_SSE_MOVDQ>, VEX;
4479 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4480 "movd\t{$src, $dst|$dst, $src}",
4481 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4484 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4485 "movd\t{$src, $dst|$dst, $src}",
4486 [(set FR32:$dst, (bitconvert GR32:$src))],
4489 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4490 "movd\t{$src, $dst|$dst, $src}",
4491 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4494 //===---------------------------------------------------------------------===//
4495 // Move Packed Doubleword Int to Packed Double Int
4497 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4498 "movd\t{$src, $dst|$dst, $src}",
4499 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4500 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX;
4501 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
4502 (ins i32mem:$dst, VR128:$src),
4503 "movd\t{$src, $dst|$dst, $src}",
4504 [(store (i32 (vector_extract (v4i32 VR128:$src),
4505 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
4507 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4508 "movd\t{$src, $dst|$dst, $src}",
4509 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4510 (iPTR 0)))], IIC_SSE_MOVD_ToGP>;
4511 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4512 "movd\t{$src, $dst|$dst, $src}",
4513 [(store (i32 (vector_extract (v4i32 VR128:$src),
4514 (iPTR 0))), addr:$dst)],
4517 //===---------------------------------------------------------------------===//
4518 // Move Packed Doubleword Int first element to Doubleword Int
4520 def VMOVPQIto64rr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4521 "vmov{d|q}\t{$src, $dst|$dst, $src}",
4522 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4525 TB, OpSize, VEX, VEX_W, Requires<[HasAVX, In64BitMode]>;
4527 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4528 "mov{d|q}\t{$src, $dst|$dst, $src}",
4529 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4533 //===---------------------------------------------------------------------===//
4534 // Bitcast FR64 <-> GR64
4536 let Predicates = [HasAVX] in
4537 def VMOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4538 "vmovq\t{$src, $dst|$dst, $src}",
4539 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4541 def VMOVSDto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4542 "mov{d|q}\t{$src, $dst|$dst, $src}",
4543 [(set GR64:$dst, (bitconvert FR64:$src))],
4544 IIC_SSE_MOVDQ>, VEX;
4545 def VMOVSDto64mr : VRPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4546 "movq\t{$src, $dst|$dst, $src}",
4547 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4548 IIC_SSE_MOVDQ>, VEX;
4550 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4551 "movq\t{$src, $dst|$dst, $src}",
4552 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
4554 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4555 "mov{d|q}\t{$src, $dst|$dst, $src}",
4556 [(set GR64:$dst, (bitconvert FR64:$src))],
4558 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4559 "movq\t{$src, $dst|$dst, $src}",
4560 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4563 //===---------------------------------------------------------------------===//
4564 // Move Scalar Single to Double Int
4566 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4567 "movd\t{$src, $dst|$dst, $src}",
4568 [(set GR32:$dst, (bitconvert FR32:$src))],
4569 IIC_SSE_MOVD_ToGP>, VEX;
4570 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4571 "movd\t{$src, $dst|$dst, $src}",
4572 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4573 IIC_SSE_MOVDQ>, VEX;
4574 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4575 "movd\t{$src, $dst|$dst, $src}",
4576 [(set GR32:$dst, (bitconvert FR32:$src))],
4578 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4579 "movd\t{$src, $dst|$dst, $src}",
4580 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4583 //===---------------------------------------------------------------------===//
4584 // Patterns and instructions to describe movd/movq to XMM register zero-extends
4586 let AddedComplexity = 15 in {
4587 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4588 "movd\t{$src, $dst|$dst, $src}",
4589 [(set VR128:$dst, (v4i32 (X86vzmovl
4590 (v4i32 (scalar_to_vector GR32:$src)))))],
4591 IIC_SSE_MOVDQ>, VEX;
4592 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4593 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4594 [(set VR128:$dst, (v2i64 (X86vzmovl
4595 (v2i64 (scalar_to_vector GR64:$src)))))],
4599 let AddedComplexity = 15 in {
4600 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4601 "movd\t{$src, $dst|$dst, $src}",
4602 [(set VR128:$dst, (v4i32 (X86vzmovl
4603 (v4i32 (scalar_to_vector GR32:$src)))))],
4605 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4606 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4607 [(set VR128:$dst, (v2i64 (X86vzmovl
4608 (v2i64 (scalar_to_vector GR64:$src)))))],
4612 let AddedComplexity = 20 in {
4613 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4614 "movd\t{$src, $dst|$dst, $src}",
4616 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4617 (loadi32 addr:$src))))))],
4618 IIC_SSE_MOVDQ>, VEX;
4619 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4620 "movd\t{$src, $dst|$dst, $src}",
4622 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4623 (loadi32 addr:$src))))))],
4627 let Predicates = [HasAVX] in {
4628 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
4629 let AddedComplexity = 20 in {
4630 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4631 (VMOVZDI2PDIrm addr:$src)>;
4632 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4633 (VMOVZDI2PDIrm addr:$src)>;
4635 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4636 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4637 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
4638 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
4639 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4640 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
4641 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
4644 let Predicates = [UseSSE2], AddedComplexity = 20 in {
4645 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4646 (MOVZDI2PDIrm addr:$src)>;
4647 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4648 (MOVZDI2PDIrm addr:$src)>;
4651 // These are the correct encodings of the instructions so that we know how to
4652 // read correct assembly, even though we continue to emit the wrong ones for
4653 // compatibility with Darwin's buggy assembler.
4654 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4655 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4656 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4657 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
4658 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4659 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4660 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4661 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
4662 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4663 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4664 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4665 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4667 //===---------------------------------------------------------------------===//
4668 // SSE2 - Move Quadword
4669 //===---------------------------------------------------------------------===//
4671 //===---------------------------------------------------------------------===//
4672 // Move Quadword Int to Packed Quadword Int
4674 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4675 "vmovq\t{$src, $dst|$dst, $src}",
4677 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4678 VEX, Requires<[HasAVX]>;
4679 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4680 "movq\t{$src, $dst|$dst, $src}",
4682 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
4684 Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
4686 //===---------------------------------------------------------------------===//
4687 // Move Packed Quadword Int to Quadword Int
4689 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4690 "movq\t{$src, $dst|$dst, $src}",
4691 [(store (i64 (vector_extract (v2i64 VR128:$src),
4692 (iPTR 0))), addr:$dst)],
4693 IIC_SSE_MOVDQ>, VEX;
4694 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4695 "movq\t{$src, $dst|$dst, $src}",
4696 [(store (i64 (vector_extract (v2i64 VR128:$src),
4697 (iPTR 0))), addr:$dst)],
4700 //===---------------------------------------------------------------------===//
4701 // Store / copy lower 64-bits of a XMM register.
4703 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4704 "movq\t{$src, $dst|$dst, $src}",
4705 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
4706 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4707 "movq\t{$src, $dst|$dst, $src}",
4708 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)],
4711 let AddedComplexity = 20 in
4712 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4713 "vmovq\t{$src, $dst|$dst, $src}",
4715 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4716 (loadi64 addr:$src))))))],
4718 XS, VEX, Requires<[HasAVX]>;
4720 let AddedComplexity = 20 in
4721 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4722 "movq\t{$src, $dst|$dst, $src}",
4724 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4725 (loadi64 addr:$src))))))],
4727 XS, Requires<[UseSSE2]>;
4729 let Predicates = [HasAVX], AddedComplexity = 20 in {
4730 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4731 (VMOVZQI2PQIrm addr:$src)>;
4732 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4733 (VMOVZQI2PQIrm addr:$src)>;
4734 def : Pat<(v2i64 (X86vzload addr:$src)),
4735 (VMOVZQI2PQIrm addr:$src)>;
4738 let Predicates = [UseSSE2], AddedComplexity = 20 in {
4739 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4740 (MOVZQI2PQIrm addr:$src)>;
4741 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4742 (MOVZQI2PQIrm addr:$src)>;
4743 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
4746 let Predicates = [HasAVX] in {
4747 def : Pat<(v4i64 (alignedX86vzload addr:$src)),
4748 (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
4749 def : Pat<(v4i64 (X86vzload addr:$src)),
4750 (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
4753 //===---------------------------------------------------------------------===//
4754 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
4755 // IA32 document. movq xmm1, xmm2 does clear the high bits.
4757 let AddedComplexity = 15 in
4758 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4759 "vmovq\t{$src, $dst|$dst, $src}",
4760 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
4762 XS, VEX, Requires<[HasAVX]>;
4763 let AddedComplexity = 15 in
4764 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4765 "movq\t{$src, $dst|$dst, $src}",
4766 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
4768 XS, Requires<[UseSSE2]>;
4770 let AddedComplexity = 20 in
4771 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4772 "vmovq\t{$src, $dst|$dst, $src}",
4773 [(set VR128:$dst, (v2i64 (X86vzmovl
4774 (loadv2i64 addr:$src))))],
4776 XS, VEX, Requires<[HasAVX]>;
4777 let AddedComplexity = 20 in {
4778 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4779 "movq\t{$src, $dst|$dst, $src}",
4780 [(set VR128:$dst, (v2i64 (X86vzmovl
4781 (loadv2i64 addr:$src))))],
4783 XS, Requires<[UseSSE2]>;
4786 let AddedComplexity = 20 in {
4787 let Predicates = [HasAVX] in {
4788 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4789 (VMOVZPQILo2PQIrm addr:$src)>;
4790 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4791 (VMOVZPQILo2PQIrr VR128:$src)>;
4793 let Predicates = [UseSSE2] in {
4794 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4795 (MOVZPQILo2PQIrm addr:$src)>;
4796 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4797 (MOVZPQILo2PQIrr VR128:$src)>;
4801 // Instructions to match in the assembler
4802 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4803 "movq\t{$src, $dst|$dst, $src}", [],
4804 IIC_SSE_MOVDQ>, VEX, VEX_W;
4805 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4806 "movq\t{$src, $dst|$dst, $src}", [],
4807 IIC_SSE_MOVDQ>, VEX, VEX_W;
4808 // Recognize "movd" with GR64 destination, but encode as a "movq"
4809 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4810 "movd\t{$src, $dst|$dst, $src}", [],
4811 IIC_SSE_MOVDQ>, VEX, VEX_W;
4813 // Instructions for the disassembler
4814 // xr = XMM register
4817 let Predicates = [HasAVX] in
4818 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4819 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
4820 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4821 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, XS;
4823 //===---------------------------------------------------------------------===//
4824 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
4825 //===---------------------------------------------------------------------===//
4826 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
4827 ValueType vt, RegisterClass RC, PatFrag mem_frag,
4828 X86MemOperand x86memop> {
4829 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4830 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4831 [(set RC:$dst, (vt (OpNode RC:$src)))],
4833 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
4834 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4835 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
4839 let Predicates = [HasAVX] in {
4840 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4841 v4f32, VR128, memopv4f32, f128mem>, VEX;
4842 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4843 v4f32, VR128, memopv4f32, f128mem>, VEX;
4844 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4845 v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L;
4846 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4847 v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L;
4849 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
4850 memopv4f32, f128mem>;
4851 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
4852 memopv4f32, f128mem>;
4854 let Predicates = [HasAVX] in {
4855 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4856 (VMOVSHDUPrr VR128:$src)>;
4857 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4858 (VMOVSHDUPrm addr:$src)>;
4859 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4860 (VMOVSLDUPrr VR128:$src)>;
4861 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4862 (VMOVSLDUPrm addr:$src)>;
4863 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
4864 (VMOVSHDUPYrr VR256:$src)>;
4865 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
4866 (VMOVSHDUPYrm addr:$src)>;
4867 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
4868 (VMOVSLDUPYrr VR256:$src)>;
4869 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
4870 (VMOVSLDUPYrm addr:$src)>;
4873 let Predicates = [UseSSE3] in {
4874 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4875 (MOVSHDUPrr VR128:$src)>;
4876 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4877 (MOVSHDUPrm addr:$src)>;
4878 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4879 (MOVSLDUPrr VR128:$src)>;
4880 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4881 (MOVSLDUPrm addr:$src)>;
4884 //===---------------------------------------------------------------------===//
4885 // SSE3 - Replicate Double FP - MOVDDUP
4886 //===---------------------------------------------------------------------===//
4888 multiclass sse3_replicate_dfp<string OpcodeStr> {
4889 let neverHasSideEffects = 1 in
4890 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4891 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4892 [], IIC_SSE_MOV_LH>;
4893 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
4894 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4897 (scalar_to_vector (loadf64 addr:$src)))))],
4901 // FIXME: Merge with above classe when there're patterns for the ymm version
4902 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
4903 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
4904 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4905 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>;
4906 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
4907 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4910 (scalar_to_vector (loadf64 addr:$src)))))]>;
4913 let Predicates = [HasAVX] in {
4914 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
4915 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
4918 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
4920 let Predicates = [HasAVX] in {
4921 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4922 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4923 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4924 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4925 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4926 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4927 def : Pat<(X86Movddup (bc_v2f64
4928 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4929 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4932 def : Pat<(X86Movddup (memopv4f64 addr:$src)),
4933 (VMOVDDUPYrm addr:$src)>;
4934 def : Pat<(X86Movddup (memopv4i64 addr:$src)),
4935 (VMOVDDUPYrm addr:$src)>;
4936 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
4937 (VMOVDDUPYrm addr:$src)>;
4938 def : Pat<(X86Movddup (v4i64 VR256:$src)),
4939 (VMOVDDUPYrr VR256:$src)>;
4942 let Predicates = [UseSSE3] in {
4943 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4944 (MOVDDUPrm addr:$src)>;
4945 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4946 (MOVDDUPrm addr:$src)>;
4947 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4948 (MOVDDUPrm addr:$src)>;
4949 def : Pat<(X86Movddup (bc_v2f64
4950 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4951 (MOVDDUPrm addr:$src)>;
4954 //===---------------------------------------------------------------------===//
4955 // SSE3 - Move Unaligned Integer
4956 //===---------------------------------------------------------------------===//
4958 let Predicates = [HasAVX] in {
4959 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4960 "vlddqu\t{$src, $dst|$dst, $src}",
4961 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
4962 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4963 "vlddqu\t{$src, $dst|$dst, $src}",
4964 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
4967 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4968 "lddqu\t{$src, $dst|$dst, $src}",
4969 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
4972 //===---------------------------------------------------------------------===//
4973 // SSE3 - Arithmetic
4974 //===---------------------------------------------------------------------===//
4976 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
4977 X86MemOperand x86memop, OpndItins itins,
4979 def rr : I<0xD0, MRMSrcReg,
4980 (outs RC:$dst), (ins RC:$src1, RC:$src2),
4982 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4983 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4984 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>;
4985 def rm : I<0xD0, MRMSrcMem,
4986 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4988 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4989 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4990 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))], itins.rr>;
4993 let Predicates = [HasAVX] in {
4994 let ExeDomain = SSEPackedSingle in {
4995 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
4996 f128mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
4997 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
4998 f256mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V, VEX_L;
5000 let ExeDomain = SSEPackedDouble in {
5001 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5002 f128mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
5003 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5004 f256mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V, VEX_L;
5007 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
5008 let ExeDomain = SSEPackedSingle in
5009 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5010 f128mem, SSE_ALU_F32P>, TB, XD;
5011 let ExeDomain = SSEPackedDouble in
5012 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5013 f128mem, SSE_ALU_F64P>, TB, OpSize;
5016 //===---------------------------------------------------------------------===//
5017 // SSE3 Instructions
5018 //===---------------------------------------------------------------------===//
5021 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5022 X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
5023 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5025 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5026 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5027 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>;
5029 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5031 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5032 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5033 [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
5034 IIC_SSE_HADDSUB_RM>;
5036 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5037 X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
5038 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5040 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5041 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5042 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>;
5044 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5046 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5047 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5048 [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
5049 IIC_SSE_HADDSUB_RM>;
5052 let Predicates = [HasAVX] in {
5053 let ExeDomain = SSEPackedSingle in {
5054 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5055 X86fhadd, 0>, VEX_4V;
5056 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5057 X86fhsub, 0>, VEX_4V;
5058 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5059 X86fhadd, 0>, VEX_4V, VEX_L;
5060 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5061 X86fhsub, 0>, VEX_4V, VEX_L;
5063 let ExeDomain = SSEPackedDouble in {
5064 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5065 X86fhadd, 0>, VEX_4V;
5066 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5067 X86fhsub, 0>, VEX_4V;
5068 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5069 X86fhadd, 0>, VEX_4V, VEX_L;
5070 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5071 X86fhsub, 0>, VEX_4V, VEX_L;
5075 let Constraints = "$src1 = $dst" in {
5076 let ExeDomain = SSEPackedSingle in {
5077 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd>;
5078 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub>;
5080 let ExeDomain = SSEPackedDouble in {
5081 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd>;
5082 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub>;
5086 //===---------------------------------------------------------------------===//
5087 // SSSE3 - Packed Absolute Instructions
5088 //===---------------------------------------------------------------------===//
5091 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5092 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
5093 Intrinsic IntId128> {
5094 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5096 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5097 [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
5100 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5102 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5105 (bitconvert (memopv2i64 addr:$src))))], IIC_SSE_PABS_RM>,
5109 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5110 multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
5111 Intrinsic IntId256> {
5112 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5114 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5115 [(set VR256:$dst, (IntId256 VR256:$src))]>,
5118 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5120 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5123 (bitconvert (memopv4i64 addr:$src))))]>, OpSize;
5126 let Predicates = [HasAVX] in {
5127 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb",
5128 int_x86_ssse3_pabs_b_128>, VEX;
5129 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw",
5130 int_x86_ssse3_pabs_w_128>, VEX;
5131 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd",
5132 int_x86_ssse3_pabs_d_128>, VEX;
5135 let Predicates = [HasAVX2] in {
5136 defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
5137 int_x86_avx2_pabs_b>, VEX, VEX_L;
5138 defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
5139 int_x86_avx2_pabs_w>, VEX, VEX_L;
5140 defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
5141 int_x86_avx2_pabs_d>, VEX, VEX_L;
5144 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb",
5145 int_x86_ssse3_pabs_b_128>;
5146 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw",
5147 int_x86_ssse3_pabs_w_128>;
5148 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd",
5149 int_x86_ssse3_pabs_d_128>;
5151 //===---------------------------------------------------------------------===//
5152 // SSSE3 - Packed Binary Operator Instructions
5153 //===---------------------------------------------------------------------===//
5155 def SSE_PHADDSUBD : OpndItins<
5156 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5158 def SSE_PHADDSUBSW : OpndItins<
5159 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5161 def SSE_PHADDSUBW : OpndItins<
5162 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5164 def SSE_PSHUFB : OpndItins<
5165 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5167 def SSE_PSIGN : OpndItins<
5168 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5170 def SSE_PMULHRSW : OpndItins<
5171 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5174 /// SS3I_binop_rm - Simple SSSE3 bin op
5175 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5176 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5177 X86MemOperand x86memop, OpndItins itins,
5179 let isCommutable = 1 in
5180 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5181 (ins RC:$src1, RC:$src2),
5183 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5184 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5185 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5187 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5188 (ins RC:$src1, x86memop:$src2),
5190 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5191 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5193 (OpVT (OpNode RC:$src1,
5194 (bitconvert (memop_frag addr:$src2)))))], itins.rm>, OpSize;
5197 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5198 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5199 Intrinsic IntId128, OpndItins itins,
5201 let isCommutable = 1 in
5202 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5203 (ins VR128:$src1, VR128:$src2),
5205 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5206 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5207 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5209 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5210 (ins VR128:$src1, i128mem:$src2),
5212 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5213 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5215 (IntId128 VR128:$src1,
5216 (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
5219 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5220 Intrinsic IntId256> {
5221 let isCommutable = 1 in
5222 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5223 (ins VR256:$src1, VR256:$src2),
5224 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5225 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5227 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5228 (ins VR256:$src1, i256mem:$src2),
5229 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5231 (IntId256 VR256:$src1,
5232 (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
5235 let ImmT = NoImm, Predicates = [HasAVX] in {
5236 let isCommutable = 0 in {
5237 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5238 memopv2i64, i128mem,
5239 SSE_PHADDSUBW, 0>, VEX_4V;
5240 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5241 memopv2i64, i128mem,
5242 SSE_PHADDSUBD, 0>, VEX_4V;
5243 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5244 memopv2i64, i128mem,
5245 SSE_PHADDSUBW, 0>, VEX_4V;
5246 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5247 memopv2i64, i128mem,
5248 SSE_PHADDSUBD, 0>, VEX_4V;
5249 defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
5250 memopv2i64, i128mem,
5251 SSE_PSIGN, 0>, VEX_4V;
5252 defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
5253 memopv2i64, i128mem,
5254 SSE_PSIGN, 0>, VEX_4V;
5255 defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
5256 memopv2i64, i128mem,
5257 SSE_PSIGN, 0>, VEX_4V;
5258 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5259 memopv2i64, i128mem,
5260 SSE_PSHUFB, 0>, VEX_4V;
5261 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5262 int_x86_ssse3_phadd_sw_128,
5263 SSE_PHADDSUBSW, 0>, VEX_4V;
5264 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5265 int_x86_ssse3_phsub_sw_128,
5266 SSE_PHADDSUBSW, 0>, VEX_4V;
5267 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5268 int_x86_ssse3_pmadd_ub_sw_128,
5269 SSE_PMADD, 0>, VEX_4V;
5271 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5272 int_x86_ssse3_pmul_hr_sw_128,
5273 SSE_PMULHRSW, 0>, VEX_4V;
5276 let ImmT = NoImm, Predicates = [HasAVX2] in {
5277 let isCommutable = 0 in {
5278 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5279 memopv4i64, i256mem,
5280 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5281 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5282 memopv4i64, i256mem,
5283 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5284 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5285 memopv4i64, i256mem,
5286 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5287 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5288 memopv4i64, i256mem,
5289 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5290 defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
5291 memopv4i64, i256mem,
5292 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5293 defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
5294 memopv4i64, i256mem,
5295 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5296 defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
5297 memopv4i64, i256mem,
5298 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5299 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5300 memopv4i64, i256mem,
5301 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5302 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5303 int_x86_avx2_phadd_sw>, VEX_4V, VEX_L;
5304 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5305 int_x86_avx2_phsub_sw>, VEX_4V, VEX_L;
5306 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5307 int_x86_avx2_pmadd_ub_sw>, VEX_4V, VEX_L;
5309 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5310 int_x86_avx2_pmul_hr_sw>, VEX_4V, VEX_L;
5313 // None of these have i8 immediate fields.
5314 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5315 let isCommutable = 0 in {
5316 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5317 memopv2i64, i128mem, SSE_PHADDSUBW>;
5318 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5319 memopv2i64, i128mem, SSE_PHADDSUBD>;
5320 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5321 memopv2i64, i128mem, SSE_PHADDSUBW>;
5322 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5323 memopv2i64, i128mem, SSE_PHADDSUBD>;
5324 defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
5325 memopv2i64, i128mem, SSE_PSIGN>;
5326 defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
5327 memopv2i64, i128mem, SSE_PSIGN>;
5328 defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
5329 memopv2i64, i128mem, SSE_PSIGN>;
5330 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5331 memopv2i64, i128mem, SSE_PSHUFB>;
5332 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5333 int_x86_ssse3_phadd_sw_128,
5335 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5336 int_x86_ssse3_phsub_sw_128,
5338 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5339 int_x86_ssse3_pmadd_ub_sw_128, SSE_PMADD>;
5341 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5342 int_x86_ssse3_pmul_hr_sw_128,
5346 //===---------------------------------------------------------------------===//
5347 // SSSE3 - Packed Align Instruction Patterns
5348 //===---------------------------------------------------------------------===//
5350 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
5351 let neverHasSideEffects = 1 in {
5352 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
5353 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5355 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5357 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5358 [], IIC_SSE_PALIGNR>, OpSize;
5360 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
5361 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5363 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5365 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5366 [], IIC_SSE_PALIGNR>, OpSize;
5370 multiclass ssse3_palign_y<string asm, bit Is2Addr = 1> {
5371 let neverHasSideEffects = 1 in {
5372 def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
5373 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5375 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5378 def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
5379 (ins VR256:$src1, i256mem:$src2, i8imm:$src3),
5381 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5386 let Predicates = [HasAVX] in
5387 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
5388 let Predicates = [HasAVX2] in
5389 defm VPALIGN : ssse3_palign_y<"vpalignr", 0>, VEX_4V, VEX_L;
5390 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
5391 defm PALIGN : ssse3_palign<"palignr">;
5393 let Predicates = [HasAVX2] in {
5394 def : Pat<(v8i32 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5395 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5396 def : Pat<(v8f32 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5397 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5398 def : Pat<(v16i16 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5399 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5400 def : Pat<(v32i8 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5401 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
5404 let Predicates = [HasAVX] in {
5405 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5406 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5407 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5408 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5409 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5410 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5411 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5412 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5415 let Predicates = [UseSSSE3] in {
5416 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5417 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5418 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5419 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5420 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5421 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5422 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5423 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5426 //===---------------------------------------------------------------------===//
5427 // SSSE3 - Thread synchronization
5428 //===---------------------------------------------------------------------===//
5430 let usesCustomInserter = 1 in {
5431 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5432 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5433 Requires<[HasSSE3]>;
5436 let Uses = [EAX, ECX, EDX] in
5437 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
5438 TB, Requires<[HasSSE3]>;
5439 let Uses = [ECX, EAX] in
5440 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5441 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
5442 TB, Requires<[HasSSE3]>;
5444 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
5445 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
5447 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
5448 Requires<[In32BitMode]>;
5449 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
5450 Requires<[In64BitMode]>;
5452 //===----------------------------------------------------------------------===//
5453 // SSE4.1 - Packed Move with Sign/Zero Extend
5454 //===----------------------------------------------------------------------===//
5456 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5457 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5458 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5459 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5461 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5462 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5464 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
5468 multiclass SS41I_binop_rm_int16_y<bits<8> opc, string OpcodeStr,
5470 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5471 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5472 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5474 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
5475 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5476 [(set VR256:$dst, (IntId (load addr:$src)))]>, OpSize;
5479 let Predicates = [HasAVX] in {
5480 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
5482 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
5484 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
5486 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
5488 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
5490 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
5494 let Predicates = [HasAVX2] in {
5495 defm VPMOVSXBW : SS41I_binop_rm_int16_y<0x20, "vpmovsxbw",
5496 int_x86_avx2_pmovsxbw>, VEX, VEX_L;
5497 defm VPMOVSXWD : SS41I_binop_rm_int16_y<0x23, "vpmovsxwd",
5498 int_x86_avx2_pmovsxwd>, VEX, VEX_L;
5499 defm VPMOVSXDQ : SS41I_binop_rm_int16_y<0x25, "vpmovsxdq",
5500 int_x86_avx2_pmovsxdq>, VEX, VEX_L;
5501 defm VPMOVZXBW : SS41I_binop_rm_int16_y<0x30, "vpmovzxbw",
5502 int_x86_avx2_pmovzxbw>, VEX, VEX_L;
5503 defm VPMOVZXWD : SS41I_binop_rm_int16_y<0x33, "vpmovzxwd",
5504 int_x86_avx2_pmovzxwd>, VEX, VEX_L;
5505 defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq",
5506 int_x86_avx2_pmovzxdq>, VEX, VEX_L;
5509 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
5510 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
5511 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
5512 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
5513 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
5514 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
5516 let Predicates = [HasAVX] in {
5517 // Common patterns involving scalar load.
5518 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
5519 (VPMOVSXBWrm addr:$src)>;
5520 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
5521 (VPMOVSXBWrm addr:$src)>;
5522 def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))),
5523 (VPMOVSXBWrm addr:$src)>;
5525 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
5526 (VPMOVSXWDrm addr:$src)>;
5527 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
5528 (VPMOVSXWDrm addr:$src)>;
5529 def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))),
5530 (VPMOVSXWDrm addr:$src)>;
5532 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
5533 (VPMOVSXDQrm addr:$src)>;
5534 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
5535 (VPMOVSXDQrm addr:$src)>;
5536 def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))),
5537 (VPMOVSXDQrm addr:$src)>;
5539 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
5540 (VPMOVZXBWrm addr:$src)>;
5541 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
5542 (VPMOVZXBWrm addr:$src)>;
5543 def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))),
5544 (VPMOVZXBWrm addr:$src)>;
5546 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
5547 (VPMOVZXWDrm addr:$src)>;
5548 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
5549 (VPMOVZXWDrm addr:$src)>;
5550 def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))),
5551 (VPMOVZXWDrm addr:$src)>;
5553 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
5554 (VPMOVZXDQrm addr:$src)>;
5555 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
5556 (VPMOVZXDQrm addr:$src)>;
5557 def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))),
5558 (VPMOVZXDQrm addr:$src)>;
5561 let Predicates = [UseSSE41] in {
5562 // Common patterns involving scalar load.
5563 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
5564 (PMOVSXBWrm addr:$src)>;
5565 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
5566 (PMOVSXBWrm addr:$src)>;
5567 def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))),
5568 (PMOVSXBWrm addr:$src)>;
5570 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
5571 (PMOVSXWDrm addr:$src)>;
5572 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
5573 (PMOVSXWDrm addr:$src)>;
5574 def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))),
5575 (PMOVSXWDrm addr:$src)>;
5577 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
5578 (PMOVSXDQrm addr:$src)>;
5579 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
5580 (PMOVSXDQrm addr:$src)>;
5581 def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))),
5582 (PMOVSXDQrm addr:$src)>;
5584 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
5585 (PMOVZXBWrm addr:$src)>;
5586 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
5587 (PMOVZXBWrm addr:$src)>;
5588 def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))),
5589 (PMOVZXBWrm addr:$src)>;
5591 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
5592 (PMOVZXWDrm addr:$src)>;
5593 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
5594 (PMOVZXWDrm addr:$src)>;
5595 def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))),
5596 (PMOVZXWDrm addr:$src)>;
5598 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
5599 (PMOVZXDQrm addr:$src)>;
5600 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
5601 (PMOVZXDQrm addr:$src)>;
5602 def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))),
5603 (PMOVZXDQrm addr:$src)>;
5606 let Predicates = [HasAVX2] in {
5607 let AddedComplexity = 15 in {
5608 def : Pat<(v4i64 (X86vzmovly (v4i32 VR128:$src))),
5609 (VPMOVZXDQYrr VR128:$src)>;
5610 def : Pat<(v8i32 (X86vzmovly (v8i16 VR128:$src))),
5611 (VPMOVZXWDYrr VR128:$src)>;
5614 def : Pat<(v4i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQYrr VR128:$src)>;
5615 def : Pat<(v8i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDYrr VR128:$src)>;
5618 let Predicates = [HasAVX] in {
5619 def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
5620 def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
5623 let Predicates = [UseSSE41] in {
5624 def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
5625 def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
5629 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5630 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5631 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5632 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5634 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
5635 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5637 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
5641 multiclass SS41I_binop_rm_int8_y<bits<8> opc, string OpcodeStr,
5643 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5644 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5645 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5647 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i32mem:$src),
5648 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5650 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
5654 let Predicates = [HasAVX] in {
5655 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
5657 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
5659 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
5661 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
5665 let Predicates = [HasAVX2] in {
5666 defm VPMOVSXBD : SS41I_binop_rm_int8_y<0x21, "vpmovsxbd",
5667 int_x86_avx2_pmovsxbd>, VEX, VEX_L;
5668 defm VPMOVSXWQ : SS41I_binop_rm_int8_y<0x24, "vpmovsxwq",
5669 int_x86_avx2_pmovsxwq>, VEX, VEX_L;
5670 defm VPMOVZXBD : SS41I_binop_rm_int8_y<0x31, "vpmovzxbd",
5671 int_x86_avx2_pmovzxbd>, VEX, VEX_L;
5672 defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq",
5673 int_x86_avx2_pmovzxwq>, VEX, VEX_L;
5676 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
5677 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
5678 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
5679 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
5681 let Predicates = [HasAVX] in {
5682 // Common patterns involving scalar load
5683 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
5684 (VPMOVSXBDrm addr:$src)>;
5685 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
5686 (VPMOVSXWQrm addr:$src)>;
5688 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
5689 (VPMOVZXBDrm addr:$src)>;
5690 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
5691 (VPMOVZXWQrm addr:$src)>;
5694 let Predicates = [UseSSE41] in {
5695 // Common patterns involving scalar load
5696 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
5697 (PMOVSXBDrm addr:$src)>;
5698 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
5699 (PMOVSXWQrm addr:$src)>;
5701 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
5702 (PMOVZXBDrm addr:$src)>;
5703 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
5704 (PMOVZXWQrm addr:$src)>;
5707 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5708 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5709 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5710 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
5712 // Expecting a i16 load any extended to i32 value.
5713 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
5714 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5715 [(set VR128:$dst, (IntId (bitconvert
5716 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
5720 multiclass SS41I_binop_rm_int4_y<bits<8> opc, string OpcodeStr,
5722 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
5723 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5724 [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
5726 // Expecting a i16 load any extended to i32 value.
5727 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i16mem:$src),
5728 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5729 [(set VR256:$dst, (IntId (bitconvert
5730 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
5734 let Predicates = [HasAVX] in {
5735 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
5737 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
5740 let Predicates = [HasAVX2] in {
5741 defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq",
5742 int_x86_avx2_pmovsxbq>, VEX, VEX_L;
5743 defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq",
5744 int_x86_avx2_pmovzxbq>, VEX, VEX_L;
5746 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
5747 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
5749 let Predicates = [HasAVX2] in {
5750 def : Pat<(v8i32 (X86vsmovl (v8i16 (bitconvert (v2i64 (load addr:$src)))))),
5751 (VPMOVSXWDYrm addr:$src)>;
5752 def : Pat<(v4i64 (X86vsmovl (v4i32 (bitconvert (v2i64 (load addr:$src)))))),
5753 (VPMOVSXDQYrm addr:$src)>;
5755 def : Pat<(v8i32 (X86vsext (v16i8 (bitconvert (v2i64
5756 (scalar_to_vector (loadi64 addr:$src))))))),
5757 (VPMOVSXBDYrm addr:$src)>;
5758 def : Pat<(v8i32 (X86vsext (v16i8 (bitconvert (v2f64
5759 (scalar_to_vector (loadf64 addr:$src))))))),
5760 (VPMOVSXBDYrm addr:$src)>;
5762 def : Pat<(v4i64 (X86vsext (v8i16 (bitconvert (v2i64
5763 (scalar_to_vector (loadi64 addr:$src))))))),
5764 (VPMOVSXWQYrm addr:$src)>;
5765 def : Pat<(v4i64 (X86vsext (v8i16 (bitconvert (v2f64
5766 (scalar_to_vector (loadf64 addr:$src))))))),
5767 (VPMOVSXWQYrm addr:$src)>;
5769 def : Pat<(v4i64 (X86vsext (v16i8 (bitconvert (v4i32
5770 (scalar_to_vector (loadi32 addr:$src))))))),
5771 (VPMOVSXBQYrm addr:$src)>;
5774 let Predicates = [HasAVX] in {
5775 // Common patterns involving scalar load
5776 def : Pat<(int_x86_sse41_pmovsxbq
5777 (bitconvert (v4i32 (X86vzmovl
5778 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5779 (VPMOVSXBQrm addr:$src)>;
5781 def : Pat<(int_x86_sse41_pmovzxbq
5782 (bitconvert (v4i32 (X86vzmovl
5783 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5784 (VPMOVZXBQrm addr:$src)>;
5787 let Predicates = [UseSSE41] in {
5788 // Common patterns involving scalar load
5789 def : Pat<(int_x86_sse41_pmovsxbq
5790 (bitconvert (v4i32 (X86vzmovl
5791 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5792 (PMOVSXBQrm addr:$src)>;
5794 def : Pat<(int_x86_sse41_pmovzxbq
5795 (bitconvert (v4i32 (X86vzmovl
5796 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5797 (PMOVZXBQrm addr:$src)>;
5799 def : Pat<(v4i32 (X86vsext (v8i16 (bitconvert (v2i64
5800 (scalar_to_vector (loadi64 addr:$src))))))),
5801 (PMOVSXWDrm addr:$src)>;
5802 def : Pat<(v4i32 (X86vsext (v8i16 (bitconvert (v2f64
5803 (scalar_to_vector (loadf64 addr:$src))))))),
5804 (PMOVSXWDrm addr:$src)>;
5805 def : Pat<(v4i32 (X86vsext (v16i8 (bitconvert (v4i32
5806 (scalar_to_vector (loadi32 addr:$src))))))),
5807 (PMOVSXBDrm addr:$src)>;
5808 def : Pat<(v2i64 (X86vsext (v8i16 (bitconvert (v4i32
5809 (scalar_to_vector (loadi32 addr:$src))))))),
5810 (PMOVSXWQrm addr:$src)>;
5811 def : Pat<(v2i64 (X86vsext (v16i8 (bitconvert (v4i32
5812 (scalar_to_vector (extloadi32i16 addr:$src))))))),
5813 (PMOVSXBQrm addr:$src)>;
5814 def : Pat<(v2i64 (X86vsext (v4i32 (bitconvert (v2i64
5815 (scalar_to_vector (loadi64 addr:$src))))))),
5816 (PMOVSXDQrm addr:$src)>;
5817 def : Pat<(v2i64 (X86vsext (v4i32 (bitconvert (v2f64
5818 (scalar_to_vector (loadf64 addr:$src))))))),
5819 (PMOVSXDQrm addr:$src)>;
5820 def : Pat<(v8i16 (X86vsext (v16i8 (bitconvert (v2i64
5821 (scalar_to_vector (loadi64 addr:$src))))))),
5822 (PMOVSXBWrm addr:$src)>;
5823 def : Pat<(v8i16 (X86vsext (v16i8 (bitconvert (v2f64
5824 (scalar_to_vector (loadf64 addr:$src))))))),
5825 (PMOVSXBWrm addr:$src)>;
5828 let Predicates = [HasAVX2] in {
5829 def : Pat<(v16i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWYrr VR128:$src)>;
5830 def : Pat<(v8i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDYrr VR128:$src)>;
5831 def : Pat<(v4i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQYrr VR128:$src)>;
5833 def : Pat<(v8i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDYrr VR128:$src)>;
5834 def : Pat<(v4i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQYrr VR128:$src)>;
5836 def : Pat<(v4i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQYrr VR128:$src)>;
5838 def : Pat<(v16i16 (X86vzext (v32i8 VR256:$src))),
5839 (VPMOVZXBWYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5840 def : Pat<(v8i32 (X86vzext (v32i8 VR256:$src))),
5841 (VPMOVZXBDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5842 def : Pat<(v4i64 (X86vzext (v32i8 VR256:$src))),
5843 (VPMOVZXBQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5845 def : Pat<(v8i32 (X86vzext (v16i16 VR256:$src))),
5846 (VPMOVZXWDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5847 def : Pat<(v4i64 (X86vzext (v16i16 VR256:$src))),
5848 (VPMOVZXWQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5850 def : Pat<(v4i64 (X86vzext (v8i32 VR256:$src))),
5851 (VPMOVZXDQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
5854 let Predicates = [HasAVX] in {
5855 def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWrr VR128:$src)>;
5856 def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDrr VR128:$src)>;
5857 def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQrr VR128:$src)>;
5859 def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDrr VR128:$src)>;
5860 def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQrr VR128:$src)>;
5862 def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQrr VR128:$src)>;
5864 def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5865 (VPMOVZXBWrm addr:$src)>;
5866 def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5867 (VPMOVZXBWrm addr:$src)>;
5868 def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5869 (VPMOVZXBDrm addr:$src)>;
5870 def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))),
5871 (VPMOVZXBQrm addr:$src)>;
5873 def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5874 (VPMOVZXWDrm addr:$src)>;
5875 def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5876 (VPMOVZXWDrm addr:$src)>;
5877 def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5878 (VPMOVZXWQrm addr:$src)>;
5880 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5881 (VPMOVZXDQrm addr:$src)>;
5882 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5883 (VPMOVZXDQrm addr:$src)>;
5884 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))),
5885 (VPMOVZXDQrm addr:$src)>;
5887 def : Pat<(v4i32 (X86vsext (v8i16 (bitconvert (v2i64
5888 (scalar_to_vector (loadi64 addr:$src))))))),
5889 (VPMOVSXWDrm addr:$src)>;
5890 def : Pat<(v2i64 (X86vsext (v4i32 (bitconvert (v2i64
5891 (scalar_to_vector (loadi64 addr:$src))))))),
5892 (VPMOVSXDQrm addr:$src)>;
5893 def : Pat<(v4i32 (X86vsext (v8i16 (bitconvert (v2f64
5894 (scalar_to_vector (loadf64 addr:$src))))))),
5895 (VPMOVSXWDrm addr:$src)>;
5896 def : Pat<(v2i64 (X86vsext (v4i32 (bitconvert (v2f64
5897 (scalar_to_vector (loadf64 addr:$src))))))),
5898 (VPMOVSXDQrm addr:$src)>;
5899 def : Pat<(v8i16 (X86vsext (v16i8 (bitconvert (v2i64
5900 (scalar_to_vector (loadi64 addr:$src))))))),
5901 (VPMOVSXBWrm addr:$src)>;
5902 def : Pat<(v8i16 (X86vsext (v16i8 (bitconvert (v2f64
5903 (scalar_to_vector (loadf64 addr:$src))))))),
5904 (VPMOVSXBWrm addr:$src)>;
5906 def : Pat<(v4i32 (X86vsext (v16i8 (bitconvert (v4i32
5907 (scalar_to_vector (loadi32 addr:$src))))))),
5908 (VPMOVSXBDrm addr:$src)>;
5909 def : Pat<(v2i64 (X86vsext (v8i16 (bitconvert (v4i32
5910 (scalar_to_vector (loadi32 addr:$src))))))),
5911 (VPMOVSXWQrm addr:$src)>;
5912 def : Pat<(v2i64 (X86vsext (v16i8 (bitconvert (v4i32
5913 (scalar_to_vector (extloadi32i16 addr:$src))))))),
5914 (VPMOVSXBQrm addr:$src)>;
5917 let Predicates = [UseSSE41] in {
5918 def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (PMOVZXBWrr VR128:$src)>;
5919 def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (PMOVZXBDrr VR128:$src)>;
5920 def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (PMOVZXBQrr VR128:$src)>;
5922 def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (PMOVZXWDrr VR128:$src)>;
5923 def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (PMOVZXWQrr VR128:$src)>;
5925 def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (PMOVZXDQrr VR128:$src)>;
5927 def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5928 (PMOVZXBWrm addr:$src)>;
5929 def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5930 (PMOVZXBWrm addr:$src)>;
5931 def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5932 (PMOVZXBDrm addr:$src)>;
5933 def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))),
5934 (PMOVZXBQrm addr:$src)>;
5936 def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5937 (PMOVZXWDrm addr:$src)>;
5938 def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5939 (PMOVZXWDrm addr:$src)>;
5940 def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
5941 (PMOVZXWQrm addr:$src)>;
5943 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))),
5944 (PMOVZXDQrm addr:$src)>;
5945 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))),
5946 (PMOVZXDQrm addr:$src)>;
5947 def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))),
5948 (PMOVZXDQrm addr:$src)>;
5951 //===----------------------------------------------------------------------===//
5952 // SSE4.1 - Extract Instructions
5953 //===----------------------------------------------------------------------===//
5955 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
5956 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
5957 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5958 (ins VR128:$src1, i32i8imm:$src2),
5959 !strconcat(OpcodeStr,
5960 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5961 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
5963 let neverHasSideEffects = 1, mayStore = 1 in
5964 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5965 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
5966 !strconcat(OpcodeStr,
5967 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5970 // There's an AssertZext in the way of writing the store pattern
5971 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5974 let Predicates = [HasAVX] in {
5975 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
5976 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
5977 (ins VR128:$src1, i32i8imm:$src2),
5978 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
5981 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
5984 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
5985 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
5986 let neverHasSideEffects = 1, mayStore = 1 in
5987 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5988 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
5989 !strconcat(OpcodeStr,
5990 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5993 // There's an AssertZext in the way of writing the store pattern
5994 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5997 let Predicates = [HasAVX] in
5998 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
6000 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
6003 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6004 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
6005 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6006 (ins VR128:$src1, i32i8imm:$src2),
6007 !strconcat(OpcodeStr,
6008 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6010 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
6011 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6012 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
6013 !strconcat(OpcodeStr,
6014 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6015 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
6016 addr:$dst)]>, OpSize;
6019 let Predicates = [HasAVX] in
6020 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
6022 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
6024 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6025 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
6026 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
6027 (ins VR128:$src1, i32i8imm:$src2),
6028 !strconcat(OpcodeStr,
6029 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6031 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
6032 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6033 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
6034 !strconcat(OpcodeStr,
6035 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6036 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
6037 addr:$dst)]>, OpSize, REX_W;
6040 let Predicates = [HasAVX] in
6041 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
6043 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
6045 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
6047 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
6048 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6049 (ins VR128:$src1, i32i8imm:$src2),
6050 !strconcat(OpcodeStr,
6051 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6053 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
6055 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6056 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
6057 !strconcat(OpcodeStr,
6058 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6059 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
6060 addr:$dst)]>, OpSize;
6063 let ExeDomain = SSEPackedSingle in {
6064 let Predicates = [HasAVX] in {
6065 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
6066 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
6067 (ins VR128:$src1, i32i8imm:$src2),
6068 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
6071 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
6074 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
6075 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6078 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6080 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6083 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6084 Requires<[UseSSE41]>;
6086 //===----------------------------------------------------------------------===//
6087 // SSE4.1 - Insert Instructions
6088 //===----------------------------------------------------------------------===//
6090 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
6091 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6092 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
6094 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6096 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6098 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
6099 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6100 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
6102 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6104 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6106 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
6107 imm:$src3))]>, OpSize;
6110 let Predicates = [HasAVX] in
6111 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
6112 let Constraints = "$src1 = $dst" in
6113 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
6115 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
6116 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6117 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
6119 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6121 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6123 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
6125 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6126 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
6128 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6130 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6132 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
6133 imm:$src3)))]>, OpSize;
6136 let Predicates = [HasAVX] in
6137 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
6138 let Constraints = "$src1 = $dst" in
6139 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
6141 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
6142 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6143 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
6145 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6147 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6149 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
6151 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6152 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
6154 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6156 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6158 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
6159 imm:$src3)))]>, OpSize;
6162 let Predicates = [HasAVX] in
6163 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
6164 let Constraints = "$src1 = $dst" in
6165 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
6167 // insertps has a few different modes, there's the first two here below which
6168 // are optimized inserts that won't zero arbitrary elements in the destination
6169 // vector. The next one matches the intrinsic and could zero arbitrary elements
6170 // in the target vector.
6171 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
6172 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6173 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
6175 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6177 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6179 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
6181 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6182 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
6184 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6186 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6188 (X86insrtps VR128:$src1,
6189 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6190 imm:$src3))]>, OpSize;
6193 let ExeDomain = SSEPackedSingle in {
6194 let Predicates = [HasAVX] in
6195 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6196 let Constraints = "$src1 = $dst" in
6197 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
6200 //===----------------------------------------------------------------------===//
6201 // SSE4.1 - Round Instructions
6202 //===----------------------------------------------------------------------===//
6204 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6205 X86MemOperand x86memop, RegisterClass RC,
6206 PatFrag mem_frag32, PatFrag mem_frag64,
6207 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6208 let ExeDomain = SSEPackedSingle in {
6209 // Intrinsic operation, reg.
6210 // Vector intrinsic operation, reg
6211 def PSr : SS4AIi8<opcps, MRMSrcReg,
6212 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
6213 !strconcat(OpcodeStr,
6214 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6215 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
6218 // Vector intrinsic operation, mem
6219 def PSm : SS4AIi8<opcps, MRMSrcMem,
6220 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
6221 !strconcat(OpcodeStr,
6222 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6224 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
6226 } // ExeDomain = SSEPackedSingle
6228 let ExeDomain = SSEPackedDouble in {
6229 // Vector intrinsic operation, reg
6230 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6231 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
6232 !strconcat(OpcodeStr,
6233 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6234 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
6237 // Vector intrinsic operation, mem
6238 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6239 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
6240 !strconcat(OpcodeStr,
6241 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6243 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
6245 } // ExeDomain = SSEPackedDouble
6248 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6251 Intrinsic F64Int, bit Is2Addr = 1> {
6252 let ExeDomain = GenericDomain in {
6254 def SSr : SS4AIi8<opcss, MRMSrcReg,
6255 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32i8imm:$src3),
6257 !strconcat(OpcodeStr,
6258 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6259 !strconcat(OpcodeStr,
6260 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6263 // Intrinsic operation, reg.
6264 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6265 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
6267 !strconcat(OpcodeStr,
6268 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6269 !strconcat(OpcodeStr,
6270 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6271 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6274 // Intrinsic operation, mem.
6275 def SSm : SS4AIi8<opcss, MRMSrcMem,
6276 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
6278 !strconcat(OpcodeStr,
6279 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6280 !strconcat(OpcodeStr,
6281 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6283 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6287 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6288 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32i8imm:$src3),
6290 !strconcat(OpcodeStr,
6291 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6292 !strconcat(OpcodeStr,
6293 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6296 // Intrinsic operation, reg.
6297 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6298 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
6300 !strconcat(OpcodeStr,
6301 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6302 !strconcat(OpcodeStr,
6303 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6304 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6307 // Intrinsic operation, mem.
6308 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6309 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
6311 !strconcat(OpcodeStr,
6312 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6313 !strconcat(OpcodeStr,
6314 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6316 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6318 } // ExeDomain = GenericDomain
6321 // FP round - roundss, roundps, roundsd, roundpd
6322 let Predicates = [HasAVX] in {
6324 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6325 memopv4f32, memopv2f64,
6326 int_x86_sse41_round_ps,
6327 int_x86_sse41_round_pd>, VEX;
6328 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6329 memopv8f32, memopv4f64,
6330 int_x86_avx_round_ps_256,
6331 int_x86_avx_round_pd_256>, VEX, VEX_L;
6332 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6333 int_x86_sse41_round_ss,
6334 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6336 def : Pat<(ffloor FR32:$src),
6337 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6338 def : Pat<(f64 (ffloor FR64:$src)),
6339 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6340 def : Pat<(f32 (fnearbyint FR32:$src)),
6341 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6342 def : Pat<(f64 (fnearbyint FR64:$src)),
6343 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6344 def : Pat<(f32 (fceil FR32:$src)),
6345 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6346 def : Pat<(f64 (fceil FR64:$src)),
6347 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6348 def : Pat<(f32 (frint FR32:$src)),
6349 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6350 def : Pat<(f64 (frint FR64:$src)),
6351 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6352 def : Pat<(f32 (ftrunc FR32:$src)),
6353 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6354 def : Pat<(f64 (ftrunc FR64:$src)),
6355 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6357 def : Pat<(v4f32 (ffloor VR128:$src)),
6358 (VROUNDPSr VR128:$src, (i32 0x1))>;
6359 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6360 (VROUNDPSr VR128:$src, (i32 0xC))>;
6361 def : Pat<(v4f32 (fceil VR128:$src)),
6362 (VROUNDPSr VR128:$src, (i32 0x2))>;
6363 def : Pat<(v4f32 (frint VR128:$src)),
6364 (VROUNDPSr VR128:$src, (i32 0x4))>;
6365 def : Pat<(v4f32 (ftrunc VR128:$src)),
6366 (VROUNDPSr VR128:$src, (i32 0x3))>;
6368 def : Pat<(v2f64 (ffloor VR128:$src)),
6369 (VROUNDPDr VR128:$src, (i32 0x1))>;
6370 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6371 (VROUNDPDr VR128:$src, (i32 0xC))>;
6372 def : Pat<(v2f64 (fceil VR128:$src)),
6373 (VROUNDPDr VR128:$src, (i32 0x2))>;
6374 def : Pat<(v2f64 (frint VR128:$src)),
6375 (VROUNDPDr VR128:$src, (i32 0x4))>;
6376 def : Pat<(v2f64 (ftrunc VR128:$src)),
6377 (VROUNDPDr VR128:$src, (i32 0x3))>;
6379 def : Pat<(v8f32 (ffloor VR256:$src)),
6380 (VROUNDYPSr VR256:$src, (i32 0x1))>;
6381 def : Pat<(v8f32 (fnearbyint VR256:$src)),
6382 (VROUNDYPSr VR256:$src, (i32 0xC))>;
6383 def : Pat<(v8f32 (fceil VR256:$src)),
6384 (VROUNDYPSr VR256:$src, (i32 0x2))>;
6385 def : Pat<(v8f32 (frint VR256:$src)),
6386 (VROUNDYPSr VR256:$src, (i32 0x4))>;
6387 def : Pat<(v8f32 (ftrunc VR256:$src)),
6388 (VROUNDYPSr VR256:$src, (i32 0x3))>;
6390 def : Pat<(v4f64 (ffloor VR256:$src)),
6391 (VROUNDYPDr VR256:$src, (i32 0x1))>;
6392 def : Pat<(v4f64 (fnearbyint VR256:$src)),
6393 (VROUNDYPDr VR256:$src, (i32 0xC))>;
6394 def : Pat<(v4f64 (fceil VR256:$src)),
6395 (VROUNDYPDr VR256:$src, (i32 0x2))>;
6396 def : Pat<(v4f64 (frint VR256:$src)),
6397 (VROUNDYPDr VR256:$src, (i32 0x4))>;
6398 def : Pat<(v4f64 (ftrunc VR256:$src)),
6399 (VROUNDYPDr VR256:$src, (i32 0x3))>;
6402 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6403 memopv4f32, memopv2f64,
6404 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6405 let Constraints = "$src1 = $dst" in
6406 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6407 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6409 let Predicates = [UseSSE41] in {
6410 def : Pat<(ffloor FR32:$src),
6411 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6412 def : Pat<(f64 (ffloor FR64:$src)),
6413 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6414 def : Pat<(f32 (fnearbyint FR32:$src)),
6415 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6416 def : Pat<(f64 (fnearbyint FR64:$src)),
6417 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6418 def : Pat<(f32 (fceil FR32:$src)),
6419 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6420 def : Pat<(f64 (fceil FR64:$src)),
6421 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6422 def : Pat<(f32 (frint FR32:$src)),
6423 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6424 def : Pat<(f64 (frint FR64:$src)),
6425 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6426 def : Pat<(f32 (ftrunc FR32:$src)),
6427 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6428 def : Pat<(f64 (ftrunc FR64:$src)),
6429 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6431 def : Pat<(v4f32 (ffloor VR128:$src)),
6432 (ROUNDPSr VR128:$src, (i32 0x1))>;
6433 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6434 (ROUNDPSr VR128:$src, (i32 0xC))>;
6435 def : Pat<(v4f32 (fceil VR128:$src)),
6436 (ROUNDPSr VR128:$src, (i32 0x2))>;
6437 def : Pat<(v4f32 (frint VR128:$src)),
6438 (ROUNDPSr VR128:$src, (i32 0x4))>;
6439 def : Pat<(v4f32 (ftrunc VR128:$src)),
6440 (ROUNDPSr VR128:$src, (i32 0x3))>;
6442 def : Pat<(v2f64 (ffloor VR128:$src)),
6443 (ROUNDPDr VR128:$src, (i32 0x1))>;
6444 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6445 (ROUNDPDr VR128:$src, (i32 0xC))>;
6446 def : Pat<(v2f64 (fceil VR128:$src)),
6447 (ROUNDPDr VR128:$src, (i32 0x2))>;
6448 def : Pat<(v2f64 (frint VR128:$src)),
6449 (ROUNDPDr VR128:$src, (i32 0x4))>;
6450 def : Pat<(v2f64 (ftrunc VR128:$src)),
6451 (ROUNDPDr VR128:$src, (i32 0x3))>;
6454 //===----------------------------------------------------------------------===//
6455 // SSE4.1 - Packed Bit Test
6456 //===----------------------------------------------------------------------===//
6458 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6459 // the intel intrinsic that corresponds to this.
6460 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6461 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6462 "vptest\t{$src2, $src1|$src1, $src2}",
6463 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6465 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6466 "vptest\t{$src2, $src1|$src1, $src2}",
6467 [(set EFLAGS,(X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6470 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6471 "vptest\t{$src2, $src1|$src1, $src2}",
6472 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6474 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6475 "vptest\t{$src2, $src1|$src1, $src2}",
6476 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
6480 let Defs = [EFLAGS] in {
6481 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6482 "ptest\t{$src2, $src1|$src1, $src2}",
6483 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6485 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6486 "ptest\t{$src2, $src1|$src1, $src2}",
6487 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6491 // The bit test instructions below are AVX only
6492 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6493 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6494 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6495 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6496 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
6497 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6498 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6499 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6503 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6504 let ExeDomain = SSEPackedSingle in {
6505 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
6506 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>,
6509 let ExeDomain = SSEPackedDouble in {
6510 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
6511 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>,
6516 //===----------------------------------------------------------------------===//
6517 // SSE4.1 - Misc Instructions
6518 //===----------------------------------------------------------------------===//
6520 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6521 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6522 "popcnt{w}\t{$src, $dst|$dst, $src}",
6523 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)]>,
6525 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6526 "popcnt{w}\t{$src, $dst|$dst, $src}",
6527 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6528 (implicit EFLAGS)]>, OpSize, XS;
6530 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6531 "popcnt{l}\t{$src, $dst|$dst, $src}",
6532 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)]>,
6534 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6535 "popcnt{l}\t{$src, $dst|$dst, $src}",
6536 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6537 (implicit EFLAGS)]>, XS;
6539 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6540 "popcnt{q}\t{$src, $dst|$dst, $src}",
6541 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)]>,
6543 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6544 "popcnt{q}\t{$src, $dst|$dst, $src}",
6545 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6546 (implicit EFLAGS)]>, XS;
6551 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6552 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6553 Intrinsic IntId128> {
6554 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6556 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6557 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
6558 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6560 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6563 (bitconvert (memopv2i64 addr:$src))))]>, OpSize;
6566 let Predicates = [HasAVX] in
6567 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6568 int_x86_sse41_phminposuw>, VEX;
6569 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6570 int_x86_sse41_phminposuw>;
6572 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
6573 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
6574 Intrinsic IntId128, bit Is2Addr = 1> {
6575 let isCommutable = 1 in
6576 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6577 (ins VR128:$src1, VR128:$src2),
6579 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6580 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6581 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
6582 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6583 (ins VR128:$src1, i128mem:$src2),
6585 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6586 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6588 (IntId128 VR128:$src1,
6589 (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
6592 /// SS41I_binop_rm_int_y - Simple SSE 4.1 binary operator
6593 multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
6594 Intrinsic IntId256> {
6595 let isCommutable = 1 in
6596 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst),
6597 (ins VR256:$src1, VR256:$src2),
6598 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6599 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>, OpSize;
6600 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst),
6601 (ins VR256:$src1, i256mem:$src2),
6602 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6604 (IntId256 VR256:$src1,
6605 (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
6609 /// SS48I_binop_rm - Simple SSE41 binary operator.
6610 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6611 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6612 X86MemOperand x86memop, bit Is2Addr = 1> {
6613 let isCommutable = 1 in
6614 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6615 (ins RC:$src1, RC:$src2),
6617 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6618 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6619 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>, OpSize;
6620 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6621 (ins RC:$src1, x86memop:$src2),
6623 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6624 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6626 (OpVT (OpNode RC:$src1,
6627 (bitconvert (memop_frag addr:$src2)))))]>, OpSize;
6630 let Predicates = [HasAVX] in {
6631 let isCommutable = 0 in
6632 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
6634 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", X86smin, v16i8, VR128,
6635 memopv2i64, i128mem, 0>, VEX_4V;
6636 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", X86smin, v4i32, VR128,
6637 memopv2i64, i128mem, 0>, VEX_4V;
6638 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", X86umin, v4i32, VR128,
6639 memopv2i64, i128mem, 0>, VEX_4V;
6640 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v8i16, VR128,
6641 memopv2i64, i128mem, 0>, VEX_4V;
6642 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v16i8, VR128,
6643 memopv2i64, i128mem, 0>, VEX_4V;
6644 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v4i32, VR128,
6645 memopv2i64, i128mem, 0>, VEX_4V;
6646 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v4i32, VR128,
6647 memopv2i64, i128mem, 0>, VEX_4V;
6648 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v8i16, VR128,
6649 memopv2i64, i128mem, 0>, VEX_4V;
6650 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
6654 let Predicates = [HasAVX2] in {
6655 let isCommutable = 0 in
6656 defm VPACKUSDW : SS41I_binop_rm_int_y<0x2B, "vpackusdw",
6657 int_x86_avx2_packusdw>, VEX_4V, VEX_L;
6658 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", X86smin, v32i8, VR256,
6659 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6660 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", X86smin, v8i32, VR256,
6661 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6662 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", X86umin, v8i32, VR256,
6663 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6664 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v16i16, VR256,
6665 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6666 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v32i8, VR256,
6667 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6668 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v8i32, VR256,
6669 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6670 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v8i32, VR256,
6671 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6672 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v16i16, VR256,
6673 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6674 defm VPMULDQ : SS41I_binop_rm_int_y<0x28, "vpmuldq",
6675 int_x86_avx2_pmul_dq>, VEX_4V, VEX_L;
6678 let Constraints = "$src1 = $dst" in {
6679 let isCommutable = 0 in
6680 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
6681 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", X86smin, v16i8, VR128,
6682 memopv2i64, i128mem>;
6683 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", X86smin, v4i32, VR128,
6684 memopv2i64, i128mem>;
6685 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", X86umin, v4i32, VR128,
6686 memopv2i64, i128mem>;
6687 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", X86umin, v8i16, VR128,
6688 memopv2i64, i128mem>;
6689 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", X86smax, v16i8, VR128,
6690 memopv2i64, i128mem>;
6691 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", X86smax, v4i32, VR128,
6692 memopv2i64, i128mem>;
6693 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", X86umax, v4i32, VR128,
6694 memopv2i64, i128mem>;
6695 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", X86umax, v8i16, VR128,
6696 memopv2i64, i128mem>;
6697 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
6700 let Predicates = [HasAVX] in {
6701 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6702 memopv2i64, i128mem, 0>, VEX_4V;
6703 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6704 memopv2i64, i128mem, 0>, VEX_4V;
6706 let Predicates = [HasAVX2] in {
6707 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6708 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6709 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6710 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6713 let Constraints = "$src1 = $dst" in {
6714 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6715 memopv2i64, i128mem>;
6716 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6717 memopv2i64, i128mem>;
6720 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
6721 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
6722 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
6723 X86MemOperand x86memop, bit Is2Addr = 1> {
6724 let isCommutable = 1 in
6725 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6726 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
6728 !strconcat(OpcodeStr,
6729 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6730 !strconcat(OpcodeStr,
6731 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6732 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
6734 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6735 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
6737 !strconcat(OpcodeStr,
6738 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6739 !strconcat(OpcodeStr,
6740 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6743 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
6747 let Predicates = [HasAVX] in {
6748 let isCommutable = 0 in {
6749 let ExeDomain = SSEPackedSingle in {
6750 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
6751 VR128, memopv4f32, f128mem, 0>, VEX_4V;
6752 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
6753 int_x86_avx_blend_ps_256, VR256, memopv8f32,
6754 f256mem, 0>, VEX_4V, VEX_L;
6756 let ExeDomain = SSEPackedDouble in {
6757 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
6758 VR128, memopv2f64, f128mem, 0>, VEX_4V;
6759 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
6760 int_x86_avx_blend_pd_256,VR256, memopv4f64,
6761 f256mem, 0>, VEX_4V, VEX_L;
6763 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
6764 VR128, memopv2i64, i128mem, 0>, VEX_4V;
6765 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
6766 VR128, memopv2i64, i128mem, 0>, VEX_4V;
6768 let ExeDomain = SSEPackedSingle in
6769 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
6770 VR128, memopv4f32, f128mem, 0>, VEX_4V;
6771 let ExeDomain = SSEPackedDouble in
6772 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
6773 VR128, memopv2f64, f128mem, 0>, VEX_4V;
6774 let ExeDomain = SSEPackedSingle in
6775 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
6776 VR256, memopv8f32, i256mem, 0>, VEX_4V, VEX_L;
6779 let Predicates = [HasAVX2] in {
6780 let isCommutable = 0 in {
6781 defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
6782 VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6783 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
6784 VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
6788 let Constraints = "$src1 = $dst" in {
6789 let isCommutable = 0 in {
6790 let ExeDomain = SSEPackedSingle in
6791 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
6792 VR128, memopv4f32, f128mem>;
6793 let ExeDomain = SSEPackedDouble in
6794 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
6795 VR128, memopv2f64, f128mem>;
6796 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
6797 VR128, memopv2i64, i128mem>;
6798 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
6799 VR128, memopv2i64, i128mem>;
6801 let ExeDomain = SSEPackedSingle in
6802 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
6803 VR128, memopv4f32, f128mem>;
6804 let ExeDomain = SSEPackedDouble in
6805 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
6806 VR128, memopv2f64, f128mem>;
6809 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
6810 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
6811 RegisterClass RC, X86MemOperand x86memop,
6812 PatFrag mem_frag, Intrinsic IntId> {
6813 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
6814 (ins RC:$src1, RC:$src2, RC:$src3),
6815 !strconcat(OpcodeStr,
6816 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6817 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
6818 IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
6820 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
6821 (ins RC:$src1, x86memop:$src2, RC:$src3),
6822 !strconcat(OpcodeStr,
6823 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6825 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
6827 IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
6830 let Predicates = [HasAVX] in {
6831 let ExeDomain = SSEPackedDouble in {
6832 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
6833 memopv2f64, int_x86_sse41_blendvpd>;
6834 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
6835 memopv4f64, int_x86_avx_blendv_pd_256>, VEX_L;
6836 } // ExeDomain = SSEPackedDouble
6837 let ExeDomain = SSEPackedSingle in {
6838 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
6839 memopv4f32, int_x86_sse41_blendvps>;
6840 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
6841 memopv8f32, int_x86_avx_blendv_ps_256>, VEX_L;
6842 } // ExeDomain = SSEPackedSingle
6843 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
6844 memopv2i64, int_x86_sse41_pblendvb>;
6847 let Predicates = [HasAVX2] in {
6848 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
6849 memopv4i64, int_x86_avx2_pblendvb>, VEX_L;
6852 let Predicates = [HasAVX] in {
6853 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
6854 (v16i8 VR128:$src2))),
6855 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6856 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
6857 (v4i32 VR128:$src2))),
6858 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6859 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
6860 (v4f32 VR128:$src2))),
6861 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6862 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
6863 (v2i64 VR128:$src2))),
6864 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6865 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
6866 (v2f64 VR128:$src2))),
6867 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
6868 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
6869 (v8i32 VR256:$src2))),
6870 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6871 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
6872 (v8f32 VR256:$src2))),
6873 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6874 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
6875 (v4i64 VR256:$src2))),
6876 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6877 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
6878 (v4f64 VR256:$src2))),
6879 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
6881 def : Pat<(v8f32 (X86Blendi (v8f32 VR256:$src1), (v8f32 VR256:$src2),
6883 (VBLENDPSYrri VR256:$src1, VR256:$src2, imm:$mask)>;
6884 def : Pat<(v4f64 (X86Blendi (v4f64 VR256:$src1), (v4f64 VR256:$src2),
6886 (VBLENDPDYrri VR256:$src1, VR256:$src2, imm:$mask)>;
6888 def : Pat<(v8i16 (X86Blendi (v8i16 VR128:$src1), (v8i16 VR128:$src2),
6890 (VPBLENDWrri VR128:$src1, VR128:$src2, imm:$mask)>;
6891 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$src1), (v4f32 VR128:$src2),
6893 (VBLENDPSrri VR128:$src1, VR128:$src2, imm:$mask)>;
6894 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$src1), (v2f64 VR128:$src2),
6896 (VBLENDPDrri VR128:$src1, VR128:$src2, imm:$mask)>;
6899 let Predicates = [HasAVX2] in {
6900 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
6901 (v32i8 VR256:$src2))),
6902 (VPBLENDVBYrr VR256:$src1, VR256:$src2, VR256:$mask)>;
6903 def : Pat<(v16i16 (X86Blendi (v16i16 VR256:$src1), (v16i16 VR256:$src2),
6905 (VPBLENDWYrri VR256:$src1, VR256:$src2, imm:$mask)>;
6908 /// SS41I_ternary_int - SSE 4.1 ternary operator
6909 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
6910 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
6911 X86MemOperand x86memop, Intrinsic IntId> {
6912 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6913 (ins VR128:$src1, VR128:$src2),
6914 !strconcat(OpcodeStr,
6915 "\t{$src2, $dst|$dst, $src2}"),
6916 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
6919 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6920 (ins VR128:$src1, x86memop:$src2),
6921 !strconcat(OpcodeStr,
6922 "\t{$src2, $dst|$dst, $src2}"),
6925 (bitconvert (mem_frag addr:$src2)), XMM0))]>, OpSize;
6929 let ExeDomain = SSEPackedDouble in
6930 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
6931 int_x86_sse41_blendvpd>;
6932 let ExeDomain = SSEPackedSingle in
6933 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
6934 int_x86_sse41_blendvps>;
6935 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
6936 int_x86_sse41_pblendvb>;
6938 // Aliases with the implicit xmm0 argument
6939 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6940 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
6941 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6942 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
6943 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6944 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
6945 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6946 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
6947 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6948 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
6949 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}",
6950 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
6952 let Predicates = [UseSSE41] in {
6953 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
6954 (v16i8 VR128:$src2))),
6955 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
6956 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
6957 (v4i32 VR128:$src2))),
6958 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
6959 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
6960 (v4f32 VR128:$src2))),
6961 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
6962 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
6963 (v2i64 VR128:$src2))),
6964 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
6965 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
6966 (v2f64 VR128:$src2))),
6967 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
6969 def : Pat<(v8i16 (X86Blendi (v8i16 VR128:$src1), (v8i16 VR128:$src2),
6971 (PBLENDWrri VR128:$src1, VR128:$src2, imm:$mask)>;
6972 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$src1), (v4f32 VR128:$src2),
6974 (BLENDPSrri VR128:$src1, VR128:$src2, imm:$mask)>;
6975 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$src1), (v2f64 VR128:$src2),
6977 (BLENDPDrri VR128:$src1, VR128:$src2, imm:$mask)>;
6981 let Predicates = [HasAVX] in
6982 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6983 "vmovntdqa\t{$src, $dst|$dst, $src}",
6984 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
6986 let Predicates = [HasAVX2] in
6987 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
6988 "vmovntdqa\t{$src, $dst|$dst, $src}",
6989 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
6991 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
6992 "movntdqa\t{$src, $dst|$dst, $src}",
6993 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
6996 //===----------------------------------------------------------------------===//
6997 // SSE4.2 - Compare Instructions
6998 //===----------------------------------------------------------------------===//
7000 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
7001 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7002 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
7003 X86MemOperand x86memop, bit Is2Addr = 1> {
7004 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
7005 (ins RC:$src1, RC:$src2),
7007 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7008 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7009 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
7011 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
7012 (ins RC:$src1, x86memop:$src2),
7014 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7015 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7017 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>, OpSize;
7020 let Predicates = [HasAVX] in
7021 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
7022 memopv2i64, i128mem, 0>, VEX_4V;
7024 let Predicates = [HasAVX2] in
7025 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
7026 memopv4i64, i256mem, 0>, VEX_4V, VEX_L;
7028 let Constraints = "$src1 = $dst" in
7029 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
7030 memopv2i64, i128mem>;
7032 //===----------------------------------------------------------------------===//
7033 // SSE4.2 - String/text Processing Instructions
7034 //===----------------------------------------------------------------------===//
7036 // Packed Compare Implicit Length Strings, Return Mask
7037 multiclass pseudo_pcmpistrm<string asm> {
7038 def REG : PseudoI<(outs VR128:$dst),
7039 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7040 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
7042 def MEM : PseudoI<(outs VR128:$dst),
7043 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7044 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
7045 (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
7048 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7049 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
7050 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[UseSSE42]>;
7053 multiclass pcmpistrm_SS42AI<string asm> {
7054 def rr : SS42AI<0x62, MRMSrcReg, (outs),
7055 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7056 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7059 def rm :SS42AI<0x62, MRMSrcMem, (outs),
7060 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7061 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7065 let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in {
7066 let Predicates = [HasAVX] in
7067 defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
7068 defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
7071 // Packed Compare Explicit Length Strings, Return Mask
7072 multiclass pseudo_pcmpestrm<string asm> {
7073 def REG : PseudoI<(outs VR128:$dst),
7074 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7075 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
7076 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7077 def MEM : PseudoI<(outs VR128:$dst),
7078 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7079 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
7080 (bc_v16i8 (memopv2i64 addr:$src3)), EDX, imm:$src5))]>;
7083 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7084 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
7085 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[UseSSE42]>;
7088 multiclass SS42AI_pcmpestrm<string asm> {
7089 def rr : SS42AI<0x60, MRMSrcReg, (outs),
7090 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7091 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7094 def rm : SS42AI<0x60, MRMSrcMem, (outs),
7095 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7096 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7100 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
7101 let Predicates = [HasAVX] in
7102 defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
7103 defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
7106 // Packed Compare Implicit Length Strings, Return Index
7107 multiclass pseudo_pcmpistri<string asm> {
7108 def REG : PseudoI<(outs GR32:$dst),
7109 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7110 [(set GR32:$dst, EFLAGS,
7111 (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
7112 def MEM : PseudoI<(outs GR32:$dst),
7113 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7114 [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
7115 (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
7118 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7119 defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI">, Requires<[HasAVX]>;
7120 defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI">, Requires<[UseSSE42]>;
7123 multiclass SS42AI_pcmpistri<string asm> {
7124 def rr : SS42AI<0x63, MRMSrcReg, (outs),
7125 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7126 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7129 def rm : SS42AI<0x63, MRMSrcMem, (outs),
7130 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7131 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7135 let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in {
7136 let Predicates = [HasAVX] in
7137 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
7138 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
7141 // Packed Compare Explicit Length Strings, Return Index
7142 multiclass pseudo_pcmpestri<string asm> {
7143 def REG : PseudoI<(outs GR32:$dst),
7144 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7145 [(set GR32:$dst, EFLAGS,
7146 (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7147 def MEM : PseudoI<(outs GR32:$dst),
7148 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7149 [(set GR32:$dst, EFLAGS,
7150 (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (memopv2i64 addr:$src3)), EDX,
7154 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7155 defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI">, Requires<[HasAVX]>;
7156 defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI">, Requires<[UseSSE42]>;
7159 multiclass SS42AI_pcmpestri<string asm> {
7160 def rr : SS42AI<0x61, MRMSrcReg, (outs),
7161 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7162 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7165 def rm : SS42AI<0x61, MRMSrcMem, (outs),
7166 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7167 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7171 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
7172 let Predicates = [HasAVX] in
7173 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
7174 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
7177 //===----------------------------------------------------------------------===//
7178 // SSE4.2 - CRC Instructions
7179 //===----------------------------------------------------------------------===//
7181 // No CRC instructions have AVX equivalents
7183 // crc intrinsic instruction
7184 // This set of instructions are only rm, the only difference is the size
7186 let Constraints = "$src1 = $dst" in {
7187 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
7188 (ins GR32:$src1, i8mem:$src2),
7189 "crc32{b} \t{$src2, $src1|$src1, $src2}",
7191 (int_x86_sse42_crc32_32_8 GR32:$src1,
7192 (load addr:$src2)))]>;
7193 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
7194 (ins GR32:$src1, GR8:$src2),
7195 "crc32{b} \t{$src2, $src1|$src1, $src2}",
7197 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
7198 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
7199 (ins GR32:$src1, i16mem:$src2),
7200 "crc32{w} \t{$src2, $src1|$src1, $src2}",
7202 (int_x86_sse42_crc32_32_16 GR32:$src1,
7203 (load addr:$src2)))]>,
7205 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
7206 (ins GR32:$src1, GR16:$src2),
7207 "crc32{w} \t{$src2, $src1|$src1, $src2}",
7209 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
7211 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
7212 (ins GR32:$src1, i32mem:$src2),
7213 "crc32{l} \t{$src2, $src1|$src1, $src2}",
7215 (int_x86_sse42_crc32_32_32 GR32:$src1,
7216 (load addr:$src2)))]>;
7217 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
7218 (ins GR32:$src1, GR32:$src2),
7219 "crc32{l} \t{$src2, $src1|$src1, $src2}",
7221 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
7222 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
7223 (ins GR64:$src1, i8mem:$src2),
7224 "crc32{b} \t{$src2, $src1|$src1, $src2}",
7226 (int_x86_sse42_crc32_64_8 GR64:$src1,
7227 (load addr:$src2)))]>,
7229 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
7230 (ins GR64:$src1, GR8:$src2),
7231 "crc32{b} \t{$src2, $src1|$src1, $src2}",
7233 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
7235 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
7236 (ins GR64:$src1, i64mem:$src2),
7237 "crc32{q} \t{$src2, $src1|$src1, $src2}",
7239 (int_x86_sse42_crc32_64_64 GR64:$src1,
7240 (load addr:$src2)))]>,
7242 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
7243 (ins GR64:$src1, GR64:$src2),
7244 "crc32{q} \t{$src2, $src1|$src1, $src2}",
7246 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
7250 //===----------------------------------------------------------------------===//
7251 // AES-NI Instructions
7252 //===----------------------------------------------------------------------===//
7254 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
7255 Intrinsic IntId128, bit Is2Addr = 1> {
7256 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
7257 (ins VR128:$src1, VR128:$src2),
7259 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7260 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7261 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
7263 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
7264 (ins VR128:$src1, i128mem:$src2),
7266 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7267 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7269 (IntId128 VR128:$src1, (memopv2i64 addr:$src2)))]>, OpSize;
7272 // Perform One Round of an AES Encryption/Decryption Flow
7273 let Predicates = [HasAVX, HasAES] in {
7274 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7275 int_x86_aesni_aesenc, 0>, VEX_4V;
7276 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7277 int_x86_aesni_aesenclast, 0>, VEX_4V;
7278 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7279 int_x86_aesni_aesdec, 0>, VEX_4V;
7280 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7281 int_x86_aesni_aesdeclast, 0>, VEX_4V;
7284 let Constraints = "$src1 = $dst" in {
7285 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7286 int_x86_aesni_aesenc>;
7287 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7288 int_x86_aesni_aesenclast>;
7289 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7290 int_x86_aesni_aesdec>;
7291 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7292 int_x86_aesni_aesdeclast>;
7295 // Perform the AES InvMixColumn Transformation
7296 let Predicates = [HasAVX, HasAES] in {
7297 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7299 "vaesimc\t{$src1, $dst|$dst, $src1}",
7301 (int_x86_aesni_aesimc VR128:$src1))]>,
7303 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7304 (ins i128mem:$src1),
7305 "vaesimc\t{$src1, $dst|$dst, $src1}",
7306 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7309 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7311 "aesimc\t{$src1, $dst|$dst, $src1}",
7313 (int_x86_aesni_aesimc VR128:$src1))]>,
7315 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7316 (ins i128mem:$src1),
7317 "aesimc\t{$src1, $dst|$dst, $src1}",
7318 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7321 // AES Round Key Generation Assist
7322 let Predicates = [HasAVX, HasAES] in {
7323 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7324 (ins VR128:$src1, i8imm:$src2),
7325 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7327 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7329 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7330 (ins i128mem:$src1, i8imm:$src2),
7331 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7333 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7336 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7337 (ins VR128:$src1, i8imm:$src2),
7338 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7340 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7342 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7343 (ins i128mem:$src1, i8imm:$src2),
7344 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7346 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7349 //===----------------------------------------------------------------------===//
7350 // PCLMUL Instructions
7351 //===----------------------------------------------------------------------===//
7353 // AVX carry-less Multiplication instructions
7354 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7355 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7356 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7358 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
7360 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7361 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7362 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7363 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7364 (memopv2i64 addr:$src2), imm:$src3))]>;
7366 // Carry-less Multiplication instructions
7367 let Constraints = "$src1 = $dst" in {
7368 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7369 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7370 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7372 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>;
7374 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7375 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7376 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7377 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7378 (memopv2i64 addr:$src2), imm:$src3))]>;
7379 } // Constraints = "$src1 = $dst"
7382 multiclass pclmul_alias<string asm, int immop> {
7383 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7384 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
7386 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7387 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
7389 def : InstAlias<!strconcat("vpclmul", asm,
7390 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7391 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
7393 def : InstAlias<!strconcat("vpclmul", asm,
7394 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7395 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
7397 defm : pclmul_alias<"hqhq", 0x11>;
7398 defm : pclmul_alias<"hqlq", 0x01>;
7399 defm : pclmul_alias<"lqhq", 0x10>;
7400 defm : pclmul_alias<"lqlq", 0x00>;
7402 //===----------------------------------------------------------------------===//
7403 // SSE4A Instructions
7404 //===----------------------------------------------------------------------===//
7406 let Predicates = [HasSSE4A] in {
7408 let Constraints = "$src = $dst" in {
7409 def EXTRQI : Ii8<0x78, MRM0r, (outs VR128:$dst),
7410 (ins VR128:$src, i8imm:$len, i8imm:$idx),
7411 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7412 [(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
7413 imm:$idx))]>, TB, OpSize;
7414 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7415 (ins VR128:$src, VR128:$mask),
7416 "extrq\t{$mask, $src|$src, $mask}",
7417 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7418 VR128:$mask))]>, TB, OpSize;
7420 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7421 (ins VR128:$src, VR128:$src2, i8imm:$len, i8imm:$idx),
7422 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7423 [(set VR128:$dst, (int_x86_sse4a_insertqi VR128:$src,
7424 VR128:$src2, imm:$len, imm:$idx))]>, XD;
7425 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7426 (ins VR128:$src, VR128:$mask),
7427 "insertq\t{$mask, $src|$src, $mask}",
7428 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7429 VR128:$mask))]>, XD;
7432 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7433 "movntss\t{$src, $dst|$dst, $src}",
7434 [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
7436 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7437 "movntsd\t{$src, $dst|$dst, $src}",
7438 [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
7441 //===----------------------------------------------------------------------===//
7443 //===----------------------------------------------------------------------===//
7445 //===----------------------------------------------------------------------===//
7446 // VBROADCAST - Load from memory and broadcast to all elements of the
7447 // destination operand
7449 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
7450 X86MemOperand x86memop, Intrinsic Int> :
7451 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7452 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7453 [(set RC:$dst, (Int addr:$src))]>, VEX;
7455 // AVX2 adds register forms
7456 class avx2_broadcast_reg<bits<8> opc, string OpcodeStr, RegisterClass RC,
7458 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7459 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7460 [(set RC:$dst, (Int VR128:$src))]>, VEX;
7462 let ExeDomain = SSEPackedSingle in {
7463 def VBROADCASTSSrm : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
7464 int_x86_avx_vbroadcast_ss>;
7465 def VBROADCASTSSYrm : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
7466 int_x86_avx_vbroadcast_ss_256>, VEX_L;
7468 let ExeDomain = SSEPackedDouble in
7469 def VBROADCASTSDYrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
7470 int_x86_avx_vbroadcast_sd_256>, VEX_L;
7471 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
7472 int_x86_avx_vbroadcastf128_pd_256>, VEX_L;
7474 let ExeDomain = SSEPackedSingle in {
7475 def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
7476 int_x86_avx2_vbroadcast_ss_ps>;
7477 def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
7478 int_x86_avx2_vbroadcast_ss_ps_256>, VEX_L;
7480 let ExeDomain = SSEPackedDouble in
7481 def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
7482 int_x86_avx2_vbroadcast_sd_pd_256>, VEX_L;
7484 let Predicates = [HasAVX2] in
7485 def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
7486 int_x86_avx2_vbroadcasti128>, VEX_L;
7488 let Predicates = [HasAVX] in
7489 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
7490 (VBROADCASTF128 addr:$src)>;
7493 //===----------------------------------------------------------------------===//
7494 // VINSERTF128 - Insert packed floating-point values
7496 let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
7497 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7498 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
7499 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7502 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7503 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
7504 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7508 let Predicates = [HasAVX] in {
7509 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
7511 (VINSERTF128rr VR256:$src1, VR128:$src2,
7512 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7513 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
7515 (VINSERTF128rr VR256:$src1, VR128:$src2,
7516 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7518 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (memopv4f32 addr:$src2),
7520 (VINSERTF128rm VR256:$src1, addr:$src2,
7521 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7522 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (memopv2f64 addr:$src2),
7524 (VINSERTF128rm VR256:$src1, addr:$src2,
7525 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7528 let Predicates = [HasAVX1Only] in {
7529 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7531 (VINSERTF128rr VR256:$src1, VR128:$src2,
7532 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7533 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7535 (VINSERTF128rr VR256:$src1, VR128:$src2,
7536 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7537 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7539 (VINSERTF128rr VR256:$src1, VR128:$src2,
7540 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7541 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7543 (VINSERTF128rr VR256:$src1, VR128:$src2,
7544 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7546 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
7548 (VINSERTF128rm VR256:$src1, addr:$src2,
7549 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7550 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
7551 (bc_v4i32 (memopv2i64 addr:$src2)),
7553 (VINSERTF128rm VR256:$src1, addr:$src2,
7554 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7555 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
7556 (bc_v16i8 (memopv2i64 addr:$src2)),
7558 (VINSERTF128rm VR256:$src1, addr:$src2,
7559 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7560 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
7561 (bc_v8i16 (memopv2i64 addr:$src2)),
7563 (VINSERTF128rm VR256:$src1, addr:$src2,
7564 (INSERT_get_vinsertf128_imm VR256:$ins))>;
7567 //===----------------------------------------------------------------------===//
7568 // VEXTRACTF128 - Extract packed floating-point values
7570 let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
7571 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7572 (ins VR256:$src1, i8imm:$src2),
7573 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7576 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7577 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
7578 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7583 let Predicates = [HasAVX] in {
7584 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7585 (v4f32 (VEXTRACTF128rr
7586 (v8f32 VR256:$src1),
7587 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7588 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7589 (v2f64 (VEXTRACTF128rr
7590 (v4f64 VR256:$src1),
7591 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7593 def : Pat<(alignedstore (v4f32 (vextractf128_extract:$ext (v8f32 VR256:$src1),
7594 (iPTR imm))), addr:$dst),
7595 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7596 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7597 def : Pat<(alignedstore (v2f64 (vextractf128_extract:$ext (v4f64 VR256:$src1),
7598 (iPTR imm))), addr:$dst),
7599 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7600 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7603 let Predicates = [HasAVX1Only] in {
7604 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7605 (v2i64 (VEXTRACTF128rr
7606 (v4i64 VR256:$src1),
7607 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7608 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7609 (v4i32 (VEXTRACTF128rr
7610 (v8i32 VR256:$src1),
7611 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7612 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7613 (v8i16 (VEXTRACTF128rr
7614 (v16i16 VR256:$src1),
7615 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7616 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
7617 (v16i8 (VEXTRACTF128rr
7618 (v32i8 VR256:$src1),
7619 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
7621 def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1),
7622 (iPTR imm))), addr:$dst),
7623 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7624 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7625 def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1),
7626 (iPTR imm))), addr:$dst),
7627 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7628 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7629 def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1),
7630 (iPTR imm))), addr:$dst),
7631 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7632 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7633 def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1),
7634 (iPTR imm))), addr:$dst),
7635 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7636 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
7639 //===----------------------------------------------------------------------===//
7640 // VMASKMOV - Conditional SIMD Packed Loads and Stores
7642 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
7643 Intrinsic IntLd, Intrinsic IntLd256,
7644 Intrinsic IntSt, Intrinsic IntSt256> {
7645 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
7646 (ins VR128:$src1, f128mem:$src2),
7647 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7648 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
7650 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
7651 (ins VR256:$src1, f256mem:$src2),
7652 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7653 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
7655 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
7656 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
7657 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7658 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
7659 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
7660 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
7661 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7662 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
7665 let ExeDomain = SSEPackedSingle in
7666 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
7667 int_x86_avx_maskload_ps,
7668 int_x86_avx_maskload_ps_256,
7669 int_x86_avx_maskstore_ps,
7670 int_x86_avx_maskstore_ps_256>;
7671 let ExeDomain = SSEPackedDouble in
7672 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
7673 int_x86_avx_maskload_pd,
7674 int_x86_avx_maskload_pd_256,
7675 int_x86_avx_maskstore_pd,
7676 int_x86_avx_maskstore_pd_256>;
7678 //===----------------------------------------------------------------------===//
7679 // VPERMIL - Permute Single and Double Floating-Point Values
7681 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
7682 RegisterClass RC, X86MemOperand x86memop_f,
7683 X86MemOperand x86memop_i, PatFrag i_frag,
7684 Intrinsic IntVar, ValueType vt> {
7685 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
7686 (ins RC:$src1, RC:$src2),
7687 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7688 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
7689 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
7690 (ins RC:$src1, x86memop_i:$src2),
7691 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7692 [(set RC:$dst, (IntVar RC:$src1,
7693 (bitconvert (i_frag addr:$src2))))]>, VEX_4V;
7695 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
7696 (ins RC:$src1, i8imm:$src2),
7697 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7698 [(set RC:$dst, (vt (X86VPermilp RC:$src1, (i8 imm:$src2))))]>, VEX;
7699 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
7700 (ins x86memop_f:$src1, i8imm:$src2),
7701 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7703 (vt (X86VPermilp (memop addr:$src1), (i8 imm:$src2))))]>, VEX;
7706 let ExeDomain = SSEPackedSingle in {
7707 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
7708 memopv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
7709 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
7710 memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
7712 let ExeDomain = SSEPackedDouble in {
7713 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
7714 memopv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
7715 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
7716 memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
7719 let Predicates = [HasAVX] in {
7720 def : Pat<(v8i32 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
7721 (VPERMILPSYri VR256:$src1, imm:$imm)>;
7722 def : Pat<(v4i64 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
7723 (VPERMILPDYri VR256:$src1, imm:$imm)>;
7724 def : Pat<(v8i32 (X86VPermilp (bc_v8i32 (memopv4i64 addr:$src1)),
7726 (VPERMILPSYmi addr:$src1, imm:$imm)>;
7727 def : Pat<(v4i64 (X86VPermilp (memopv4i64 addr:$src1), (i8 imm:$imm))),
7728 (VPERMILPDYmi addr:$src1, imm:$imm)>;
7730 def : Pat<(v2i64 (X86VPermilp VR128:$src1, (i8 imm:$imm))),
7731 (VPERMILPDri VR128:$src1, imm:$imm)>;
7732 def : Pat<(v2i64 (X86VPermilp (memopv2i64 addr:$src1), (i8 imm:$imm))),
7733 (VPERMILPDmi addr:$src1, imm:$imm)>;
7736 //===----------------------------------------------------------------------===//
7737 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
7739 let ExeDomain = SSEPackedSingle in {
7740 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
7741 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
7742 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7743 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
7744 (i8 imm:$src3))))]>, VEX_4V, VEX_L;
7745 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
7746 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
7747 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7748 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv8f32 addr:$src2),
7749 (i8 imm:$src3)))]>, VEX_4V, VEX_L;
7752 let Predicates = [HasAVX] in {
7753 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7754 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7755 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
7756 (memopv4f64 addr:$src2), (i8 imm:$imm))),
7757 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7760 let Predicates = [HasAVX1Only] in {
7761 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7762 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7763 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7764 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7765 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7766 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7767 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
7768 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
7770 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
7771 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7772 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7773 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
7774 (memopv4i64 addr:$src2), (i8 imm:$imm))),
7775 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7776 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
7777 (bc_v32i8 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7778 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7779 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
7780 (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
7781 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
7784 //===----------------------------------------------------------------------===//
7785 // VZERO - Zero YMM registers
7787 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
7788 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
7789 // Zero All YMM registers
7790 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
7791 [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
7793 // Zero Upper bits of YMM registers
7794 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
7795 [(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;
7798 //===----------------------------------------------------------------------===//
7799 // Half precision conversion instructions
7800 //===----------------------------------------------------------------------===//
7801 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
7802 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7803 "vcvtph2ps\t{$src, $dst|$dst, $src}",
7804 [(set RC:$dst, (Int VR128:$src))]>,
7806 let neverHasSideEffects = 1, mayLoad = 1 in
7807 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7808 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
7811 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
7812 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
7813 (ins RC:$src1, i32i8imm:$src2),
7814 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7815 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
7817 let neverHasSideEffects = 1, mayStore = 1 in
7818 def mr : Ii8<0x1D, MRMDestMem, (outs),
7819 (ins x86memop:$dst, RC:$src1, i32i8imm:$src2),
7820 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
7824 let Predicates = [HasAVX, HasF16C] in {
7825 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
7826 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
7827 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
7828 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
7831 //===----------------------------------------------------------------------===//
7832 // AVX2 Instructions
7833 //===----------------------------------------------------------------------===//
7835 /// AVX2_binop_rmi_int - AVX2 binary operator with 8-bit immediate
7836 multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
7837 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
7838 X86MemOperand x86memop> {
7839 let isCommutable = 1 in
7840 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
7841 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
7842 !strconcat(OpcodeStr,
7843 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7844 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
7846 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
7847 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
7848 !strconcat(OpcodeStr,
7849 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7852 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
7856 let isCommutable = 0 in {
7857 defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
7858 VR128, memopv2i64, i128mem>;
7859 defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
7860 VR256, memopv4i64, i256mem>, VEX_L;
7863 def : Pat<(v4i32 (X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2),
7865 (VPBLENDDrri VR128:$src1, VR128:$src2, imm:$mask)>;
7866 def : Pat<(v8i32 (X86Blendi (v8i32 VR256:$src1), (v8i32 VR256:$src2),
7868 (VPBLENDDYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7870 //===----------------------------------------------------------------------===//
7871 // VPBROADCAST - Load from memory and broadcast to all elements of the
7872 // destination operand
7874 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
7875 X86MemOperand x86memop, PatFrag ld_frag,
7876 Intrinsic Int128, Intrinsic Int256> {
7877 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
7878 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7879 [(set VR128:$dst, (Int128 VR128:$src))]>, VEX;
7880 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
7881 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7883 (Int128 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
7884 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
7885 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7886 [(set VR256:$dst, (Int256 VR128:$src))]>, VEX, VEX_L;
7887 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
7888 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7890 (Int256 (scalar_to_vector (ld_frag addr:$src))))]>,
7894 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
7895 int_x86_avx2_pbroadcastb_128,
7896 int_x86_avx2_pbroadcastb_256>;
7897 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
7898 int_x86_avx2_pbroadcastw_128,
7899 int_x86_avx2_pbroadcastw_256>;
7900 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
7901 int_x86_avx2_pbroadcastd_128,
7902 int_x86_avx2_pbroadcastd_256>;
7903 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
7904 int_x86_avx2_pbroadcastq_128,
7905 int_x86_avx2_pbroadcastq_256>;
7907 let Predicates = [HasAVX2] in {
7908 def : Pat<(v16i8 (X86VBroadcast (loadi8 addr:$src))),
7909 (VPBROADCASTBrm addr:$src)>;
7910 def : Pat<(v32i8 (X86VBroadcast (loadi8 addr:$src))),
7911 (VPBROADCASTBYrm addr:$src)>;
7912 def : Pat<(v8i16 (X86VBroadcast (loadi16 addr:$src))),
7913 (VPBROADCASTWrm addr:$src)>;
7914 def : Pat<(v16i16 (X86VBroadcast (loadi16 addr:$src))),
7915 (VPBROADCASTWYrm addr:$src)>;
7916 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
7917 (VPBROADCASTDrm addr:$src)>;
7918 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
7919 (VPBROADCASTDYrm addr:$src)>;
7920 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
7921 (VPBROADCASTQrm addr:$src)>;
7922 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
7923 (VPBROADCASTQYrm addr:$src)>;
7925 def : Pat<(v16i8 (X86VBroadcast (v16i8 VR128:$src))),
7926 (VPBROADCASTBrr VR128:$src)>;
7927 def : Pat<(v32i8 (X86VBroadcast (v16i8 VR128:$src))),
7928 (VPBROADCASTBYrr VR128:$src)>;
7929 def : Pat<(v8i16 (X86VBroadcast (v8i16 VR128:$src))),
7930 (VPBROADCASTWrr VR128:$src)>;
7931 def : Pat<(v16i16 (X86VBroadcast (v8i16 VR128:$src))),
7932 (VPBROADCASTWYrr VR128:$src)>;
7933 def : Pat<(v4i32 (X86VBroadcast (v4i32 VR128:$src))),
7934 (VPBROADCASTDrr VR128:$src)>;
7935 def : Pat<(v8i32 (X86VBroadcast (v4i32 VR128:$src))),
7936 (VPBROADCASTDYrr VR128:$src)>;
7937 def : Pat<(v2i64 (X86VBroadcast (v2i64 VR128:$src))),
7938 (VPBROADCASTQrr VR128:$src)>;
7939 def : Pat<(v4i64 (X86VBroadcast (v2i64 VR128:$src))),
7940 (VPBROADCASTQYrr VR128:$src)>;
7941 def : Pat<(v4f32 (X86VBroadcast (v4f32 VR128:$src))),
7942 (VBROADCASTSSrr VR128:$src)>;
7943 def : Pat<(v8f32 (X86VBroadcast (v4f32 VR128:$src))),
7944 (VBROADCASTSSYrr VR128:$src)>;
7945 def : Pat<(v2f64 (X86VBroadcast (v2f64 VR128:$src))),
7946 (VPBROADCASTQrr VR128:$src)>;
7947 def : Pat<(v4f64 (X86VBroadcast (v2f64 VR128:$src))),
7948 (VBROADCASTSDYrr VR128:$src)>;
7950 // Provide fallback in case the load node that is used in the patterns above
7951 // is used by additional users, which prevents the pattern selection.
7952 let AddedComplexity = 20 in {
7953 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7954 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
7955 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7956 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
7957 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7958 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
7960 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
7961 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
7962 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
7963 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
7964 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
7965 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
7969 // AVX1 broadcast patterns
7970 let Predicates = [HasAVX1Only] in {
7971 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
7972 (VBROADCASTSSYrm addr:$src)>;
7973 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
7974 (VBROADCASTSDYrm addr:$src)>;
7975 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
7976 (VBROADCASTSSrm addr:$src)>;
7979 let Predicates = [HasAVX] in {
7980 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
7981 (VBROADCASTSSYrm addr:$src)>;
7982 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
7983 (VBROADCASTSDYrm addr:$src)>;
7984 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
7985 (VBROADCASTSSrm addr:$src)>;
7987 // Provide fallback in case the load node that is used in the patterns above
7988 // is used by additional users, which prevents the pattern selection.
7989 let AddedComplexity = 20 in {
7990 // 128bit broadcasts:
7991 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
7992 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
7993 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
7994 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
7995 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
7996 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
7997 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
7998 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
7999 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
8000 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
8002 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8003 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
8004 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8005 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
8006 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
8007 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
8008 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8009 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
8010 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
8011 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
8015 //===----------------------------------------------------------------------===//
8016 // VPERM - Permute instructions
8019 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8021 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8022 (ins VR256:$src1, VR256:$src2),
8023 !strconcat(OpcodeStr,
8024 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8026 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
8028 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8029 (ins VR256:$src1, i256mem:$src2),
8030 !strconcat(OpcodeStr,
8031 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8033 (OpVT (X86VPermv VR256:$src1,
8034 (bitconvert (mem_frag addr:$src2)))))]>,
8038 defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, v8i32>;
8039 let ExeDomain = SSEPackedSingle in
8040 defm VPERMPS : avx2_perm<0x16, "vpermps", memopv8f32, v8f32>;
8042 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8044 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
8045 (ins VR256:$src1, i8imm:$src2),
8046 !strconcat(OpcodeStr,
8047 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8049 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8051 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8052 (ins i256mem:$src1, i8imm:$src2),
8053 !strconcat(OpcodeStr,
8054 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8056 (OpVT (X86VPermi (mem_frag addr:$src1),
8057 (i8 imm:$src2))))]>, VEX, VEX_L;
8060 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, v4i64>, VEX_W;
8061 let ExeDomain = SSEPackedDouble in
8062 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, v4f64>, VEX_W;
8064 //===----------------------------------------------------------------------===//
8065 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8067 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8068 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
8069 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8070 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8071 (i8 imm:$src3))))]>, VEX_4V, VEX_L;
8072 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8073 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
8074 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8075 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv4i64 addr:$src2),
8076 (i8 imm:$src3)))]>, VEX_4V, VEX_L;
8078 let Predicates = [HasAVX2] in {
8079 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8080 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8081 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8082 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8083 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8084 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8086 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (memopv4i64 addr:$src2)),
8088 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8089 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8090 (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
8091 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8092 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)),
8094 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8098 //===----------------------------------------------------------------------===//
8099 // VINSERTI128 - Insert packed integer values
8101 let neverHasSideEffects = 1 in {
8102 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8103 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
8104 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8107 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8108 (ins VR256:$src1, i128mem:$src2, i8imm:$src3),
8109 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8113 let Predicates = [HasAVX2] in {
8114 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8116 (VINSERTI128rr VR256:$src1, VR128:$src2,
8117 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8118 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8120 (VINSERTI128rr VR256:$src1, VR128:$src2,
8121 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8122 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8124 (VINSERTI128rr VR256:$src1, VR128:$src2,
8125 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8126 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8128 (VINSERTI128rr VR256:$src1, VR128:$src2,
8129 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8131 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
8133 (VINSERTI128rm VR256:$src1, addr:$src2,
8134 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8135 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
8136 (bc_v4i32 (memopv2i64 addr:$src2)),
8138 (VINSERTI128rm VR256:$src1, addr:$src2,
8139 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8140 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
8141 (bc_v16i8 (memopv2i64 addr:$src2)),
8143 (VINSERTI128rm VR256:$src1, addr:$src2,
8144 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8145 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
8146 (bc_v8i16 (memopv2i64 addr:$src2)),
8148 (VINSERTI128rm VR256:$src1, addr:$src2,
8149 (INSERT_get_vinsertf128_imm VR256:$ins))>;
8152 //===----------------------------------------------------------------------===//
8153 // VEXTRACTI128 - Extract packed integer values
8155 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8156 (ins VR256:$src1, i8imm:$src2),
8157 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8159 (int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
8161 let neverHasSideEffects = 1, mayStore = 1 in
8162 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8163 (ins i128mem:$dst, VR256:$src1, i8imm:$src2),
8164 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8167 let Predicates = [HasAVX2] in {
8168 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
8169 (v2i64 (VEXTRACTI128rr
8170 (v4i64 VR256:$src1),
8171 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
8172 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
8173 (v4i32 (VEXTRACTI128rr
8174 (v8i32 VR256:$src1),
8175 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
8176 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
8177 (v8i16 (VEXTRACTI128rr
8178 (v16i16 VR256:$src1),
8179 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
8180 def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)),
8181 (v16i8 (VEXTRACTI128rr
8182 (v32i8 VR256:$src1),
8183 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
8185 def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1),
8186 (iPTR imm))), addr:$dst),
8187 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8188 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
8189 def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1),
8190 (iPTR imm))), addr:$dst),
8191 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8192 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
8193 def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1),
8194 (iPTR imm))), addr:$dst),
8195 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8196 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
8197 def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1),
8198 (iPTR imm))), addr:$dst),
8199 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8200 (EXTRACT_get_vextractf128_imm VR128:$ext))>;
8203 //===----------------------------------------------------------------------===//
8204 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8206 multiclass avx2_pmovmask<string OpcodeStr,
8207 Intrinsic IntLd128, Intrinsic IntLd256,
8208 Intrinsic IntSt128, Intrinsic IntSt256> {
8209 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8210 (ins VR128:$src1, i128mem:$src2),
8211 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8212 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
8213 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8214 (ins VR256:$src1, i256mem:$src2),
8215 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8216 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8218 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8219 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8220 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8221 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8222 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8223 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8224 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8225 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8228 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8229 int_x86_avx2_maskload_d,
8230 int_x86_avx2_maskload_d_256,
8231 int_x86_avx2_maskstore_d,
8232 int_x86_avx2_maskstore_d_256>;
8233 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8234 int_x86_avx2_maskload_q,
8235 int_x86_avx2_maskload_q_256,
8236 int_x86_avx2_maskstore_q,
8237 int_x86_avx2_maskstore_q_256>, VEX_W;
8240 //===----------------------------------------------------------------------===//
8241 // Variable Bit Shifts
8243 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
8244 ValueType vt128, ValueType vt256> {
8245 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
8246 (ins VR128:$src1, VR128:$src2),
8247 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8249 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
8251 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
8252 (ins VR128:$src1, i128mem:$src2),
8253 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8255 (vt128 (OpNode VR128:$src1,
8256 (vt128 (bitconvert (memopv2i64 addr:$src2))))))]>,
8258 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8259 (ins VR256:$src1, VR256:$src2),
8260 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8262 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
8264 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8265 (ins VR256:$src1, i256mem:$src2),
8266 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8268 (vt256 (OpNode VR256:$src1,
8269 (vt256 (bitconvert (memopv4i64 addr:$src2))))))]>,
8273 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
8274 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
8275 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
8276 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
8277 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
8279 //===----------------------------------------------------------------------===//
8280 // VGATHER - GATHER Operations
8281 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
8282 X86MemOperand memop128, X86MemOperand memop256> {
8283 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
8284 (ins VR128:$src1, memop128:$src2, VR128:$mask),
8285 !strconcat(OpcodeStr,
8286 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8288 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
8289 (ins RC256:$src1, memop256:$src2, RC256:$mask),
8290 !strconcat(OpcodeStr,
8291 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8292 []>, VEX_4VOp3, VEX_L;
8295 let mayLoad = 1, Constraints = "$src1 = $dst, $mask = $mask_wb" in {
8296 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
8297 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
8298 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
8299 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
8300 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
8301 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
8302 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
8303 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;