1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // Non-instruction patterns
120 //===----------------------------------------------------------------------===//
122 // A vector extract of the first f32 position is a subregister copy
123 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
124 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
126 // A 128-bit subvector extract from the first 256-bit vector position
127 // is a subregister copy that needs no instruction.
128 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
129 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
130 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
131 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
133 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
134 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
135 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
136 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
138 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))),
139 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
140 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))),
141 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
143 // A 128-bit subvector insert to the first 256-bit vector position
144 // is a subregister copy that needs no instruction.
145 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
146 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
147 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
148 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
149 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
150 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
151 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
152 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
153 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
154 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
155 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
156 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
158 // Implicitly promote a 32-bit scalar to a vector.
159 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
160 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
161 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
162 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
163 // Implicitly promote a 64-bit scalar to a vector.
164 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
165 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
166 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
167 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
169 // Bitcasts between 128-bit vector types. Return the original type since
170 // no instruction is needed for the conversion
171 let Predicates = [HasXMMInt] in {
172 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
173 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
174 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
175 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
176 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
177 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
178 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
179 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
180 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
181 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
182 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
183 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
184 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
185 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
186 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
187 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
188 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
189 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
190 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
191 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
192 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
193 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
194 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
195 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
196 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
197 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
198 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
199 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
200 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
201 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
204 // Bitcasts between 256-bit vector types. Return the original type since
205 // no instruction is needed for the conversion
206 let Predicates = [HasAVX] in {
207 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
208 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
209 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
210 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
211 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
212 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
213 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
214 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
215 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
216 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
217 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
218 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
219 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
220 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
221 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
222 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
223 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
224 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
225 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
226 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
227 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
228 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
229 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
230 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
231 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
232 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
233 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
234 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
235 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
236 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
239 //===----------------------------------------------------------------------===//
240 // AVX & SSE - Zero/One Vectors
241 //===----------------------------------------------------------------------===//
243 // Alias instructions that map zero vector to pxor / xorp* for sse.
244 // We set canFoldAsLoad because this can be converted to a constant-pool
245 // load of an all-zeros value if folding it would be beneficial.
246 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
247 // JIT implementation, it does not expand the instructions below like
248 // X86MCInstLower does.
249 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
250 isCodeGenOnly = 1 in {
251 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
252 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
253 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
254 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
255 let ExeDomain = SSEPackedInt in
256 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
257 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
260 // The same as done above but for AVX. The 128-bit versions are the
261 // same, but re-encoded. The 256-bit does not support PI version, and
262 // doesn't need it because on sandy bridge the register is set to zero
263 // at the rename stage without using any execution unit, so SET0PSY
264 // and SET0PDY can be used for vector int instructions without penalty
265 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
266 // JIT implementatioan, it does not expand the instructions below like
267 // X86MCInstLower does.
268 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
269 isCodeGenOnly = 1, Predicates = [HasAVX] in {
270 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
271 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
272 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
273 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
274 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
275 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
276 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
277 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
278 let ExeDomain = SSEPackedInt in
279 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
280 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
283 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
284 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
285 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
287 // AVX has no support for 256-bit integer instructions, but since the 128-bit
288 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
289 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
290 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
291 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
293 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
294 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
295 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
297 //===----------------------------------------------------------------------===//
298 // SSE 1 & 2 - Move FP Scalar Instructions
300 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
301 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
302 // is used instead. Register-to-register movss/movsd is not modeled as an
303 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
304 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
305 //===----------------------------------------------------------------------===//
307 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
308 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
309 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
311 // Loading from memory automatically zeroing upper bits.
312 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
313 PatFrag mem_pat, string OpcodeStr> :
314 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
315 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
316 [(set RC:$dst, (mem_pat addr:$src))]>;
319 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
320 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
321 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
322 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
324 let canFoldAsLoad = 1, isReMaterializable = 1 in {
325 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
326 let AddedComplexity = 20 in
327 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
330 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
331 "movss\t{$src, $dst|$dst, $src}",
332 [(store FR32:$src, addr:$dst)]>, XS, VEX;
333 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
334 "movsd\t{$src, $dst|$dst, $src}",
335 [(store FR64:$src, addr:$dst)]>, XD, VEX;
338 let Constraints = "$src1 = $dst" in {
339 def MOVSSrr : sse12_move_rr<FR32, v4f32,
340 "movss\t{$src2, $dst|$dst, $src2}">, XS;
341 def MOVSDrr : sse12_move_rr<FR64, v2f64,
342 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
345 let canFoldAsLoad = 1, isReMaterializable = 1 in {
346 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
348 let AddedComplexity = 20 in
349 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
352 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
353 "movss\t{$src, $dst|$dst, $src}",
354 [(store FR32:$src, addr:$dst)]>;
355 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
356 "movsd\t{$src, $dst|$dst, $src}",
357 [(store FR64:$src, addr:$dst)]>;
360 let Predicates = [HasSSE1] in {
361 let AddedComplexity = 15 in {
362 // Extract the low 32-bit value from one vector and insert it into another.
363 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
364 (MOVSSrr (v4f32 VR128:$src1),
365 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
366 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
367 (MOVSSrr (v4i32 VR128:$src1),
368 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
370 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
371 // MOVSS to the lower bits.
372 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
373 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
374 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
375 (MOVSSrr (v4f32 (V_SET0PS)),
376 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
377 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
378 (MOVSSrr (v4i32 (V_SET0PI)),
379 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
382 let AddedComplexity = 20 in {
383 // MOVSSrm zeros the high parts of the register; represent this
384 // with SUBREG_TO_REG.
385 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
386 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
387 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
388 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
389 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
390 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
393 // Extract and store.
394 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
397 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
399 // Shuffle with MOVSS
400 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
401 (MOVSSrr VR128:$src1, FR32:$src2)>;
402 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
403 (MOVSSrr (v4i32 VR128:$src1),
404 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
405 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
406 (MOVSSrr (v4f32 VR128:$src1),
407 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
410 let Predicates = [HasSSE2] in {
411 let AddedComplexity = 15 in {
412 // Extract the low 64-bit value from one vector and insert it into another.
413 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
414 (MOVSDrr (v2f64 VR128:$src1),
415 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
416 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
417 (MOVSDrr (v2i64 VR128:$src1),
418 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
420 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
421 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
422 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
423 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
424 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
426 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
427 // MOVSD to the lower bits.
428 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
429 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
432 let AddedComplexity = 20 in {
433 // MOVSDrm zeros the high parts of the register; represent this
434 // with SUBREG_TO_REG.
435 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
436 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
437 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
438 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
439 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
440 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
441 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
442 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
443 def : Pat<(v2f64 (X86vzload addr:$src)),
444 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
447 // Extract and store.
448 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
451 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
453 // Shuffle with MOVSD
454 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
455 (MOVSDrr VR128:$src1, FR64:$src2)>;
456 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
457 (MOVSDrr (v2i64 VR128:$src1),
458 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
459 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
460 (MOVSDrr (v2f64 VR128:$src1),
461 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
462 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
463 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
464 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
465 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
467 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
468 // is during lowering, where it's not possible to recognize the fold cause
469 // it has two uses through a bitcast. One use disappears at isel time and the
470 // fold opportunity reappears.
471 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
472 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
473 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
474 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
477 let Predicates = [HasAVX] in {
478 let AddedComplexity = 15 in {
479 // Extract the low 32-bit value from one vector and insert it into another.
480 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
481 (VMOVSSrr (v4f32 VR128:$src1),
482 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
483 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
484 (VMOVSSrr (v4i32 VR128:$src1),
485 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
487 // Extract the low 64-bit value from one vector and insert it into another.
488 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
489 (VMOVSDrr (v2f64 VR128:$src1),
490 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
491 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
492 (VMOVSDrr (v2i64 VR128:$src1),
493 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
495 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
496 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
497 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
498 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
499 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
501 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
502 // MOVS{S,D} to the lower bits.
503 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
504 (VMOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
505 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
506 (VMOVSSrr (v4f32 (V_SET0PS)),
507 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
508 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
509 (VMOVSSrr (v4i32 (V_SET0PI)),
510 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
511 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
512 (VMOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
515 let AddedComplexity = 20 in {
516 // MOVSSrm zeros the high parts of the register; represent this
517 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
518 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
519 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
520 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
521 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
522 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
523 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
525 // MOVSDrm zeros the high parts of the register; represent this
526 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
527 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
528 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
529 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
530 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
531 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
532 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
533 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
534 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
535 def : Pat<(v2f64 (X86vzload addr:$src)),
536 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
538 // Represent the same patterns above but in the form they appear for
540 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
541 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
542 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
543 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
544 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
545 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
548 // Extract and store.
549 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
552 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
553 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
556 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
558 // Shuffle with VMOVSS
559 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
560 (VMOVSSrr VR128:$src1, FR32:$src2)>;
561 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
562 (VMOVSSrr (v4i32 VR128:$src1),
563 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
564 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
565 (VMOVSSrr (v4f32 VR128:$src1),
566 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
568 // Shuffle with VMOVSD
569 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
570 (VMOVSDrr VR128:$src1, FR64:$src2)>;
571 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
572 (VMOVSDrr (v2i64 VR128:$src1),
573 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
574 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
575 (VMOVSDrr (v2f64 VR128:$src1),
576 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
577 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
578 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),
580 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
581 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),
584 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
585 // is during lowering, where it's not possible to recognize the fold cause
586 // it has two uses through a bitcast. One use disappears at isel time and the
587 // fold opportunity reappears.
588 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
589 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),
591 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
592 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),
596 //===----------------------------------------------------------------------===//
597 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
598 //===----------------------------------------------------------------------===//
600 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
601 X86MemOperand x86memop, PatFrag ld_frag,
602 string asm, Domain d,
603 bit IsReMaterializable = 1> {
604 let neverHasSideEffects = 1 in
605 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
606 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
607 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
608 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
609 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
610 [(set RC:$dst, (ld_frag addr:$src))], d>;
613 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
614 "movaps", SSEPackedSingle>, TB, VEX;
615 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
616 "movapd", SSEPackedDouble>, TB, OpSize, VEX;
617 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
618 "movups", SSEPackedSingle>, TB, VEX;
619 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
620 "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
622 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
623 "movaps", SSEPackedSingle>, TB, VEX;
624 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
625 "movapd", SSEPackedDouble>, TB, OpSize, VEX;
626 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
627 "movups", SSEPackedSingle>, TB, VEX;
628 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
629 "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
630 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
631 "movaps", SSEPackedSingle>, TB;
632 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
633 "movapd", SSEPackedDouble>, TB, OpSize;
634 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
635 "movups", SSEPackedSingle>, TB;
636 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
637 "movupd", SSEPackedDouble, 0>, TB, OpSize;
639 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
640 "movaps\t{$src, $dst|$dst, $src}",
641 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
642 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
643 "movapd\t{$src, $dst|$dst, $src}",
644 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
645 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
646 "movups\t{$src, $dst|$dst, $src}",
647 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
648 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
649 "movupd\t{$src, $dst|$dst, $src}",
650 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
651 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
652 "movaps\t{$src, $dst|$dst, $src}",
653 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
654 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
655 "movapd\t{$src, $dst|$dst, $src}",
656 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
657 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
658 "movups\t{$src, $dst|$dst, $src}",
659 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
660 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
661 "movupd\t{$src, $dst|$dst, $src}",
662 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
664 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
665 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
666 (VMOVUPSYmr addr:$dst, VR256:$src)>;
668 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
669 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
670 (VMOVUPDYmr addr:$dst, VR256:$src)>;
672 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
673 "movaps\t{$src, $dst|$dst, $src}",
674 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
675 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
676 "movapd\t{$src, $dst|$dst, $src}",
677 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
678 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
679 "movups\t{$src, $dst|$dst, $src}",
680 [(store (v4f32 VR128:$src), addr:$dst)]>;
681 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
682 "movupd\t{$src, $dst|$dst, $src}",
683 [(store (v2f64 VR128:$src), addr:$dst)]>;
685 let Predicates = [HasAVX] in {
686 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
687 (VMOVUPSmr addr:$dst, VR128:$src)>;
688 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
689 (VMOVUPDmr addr:$dst, VR128:$src)>;
692 let Predicates = [HasSSE1] in
693 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
694 (MOVUPSmr addr:$dst, VR128:$src)>;
695 let Predicates = [HasSSE2] in
696 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
697 (MOVUPDmr addr:$dst, VR128:$src)>;
699 // Use movaps / movups for SSE integer load / store (one byte shorter).
700 // The instructions selected below are then converted to MOVDQA/MOVDQU
701 // during the SSE domain pass.
702 let Predicates = [HasSSE1] in {
703 def : Pat<(alignedloadv4i32 addr:$src),
704 (MOVAPSrm addr:$src)>;
705 def : Pat<(loadv4i32 addr:$src),
706 (MOVUPSrm addr:$src)>;
707 def : Pat<(alignedloadv2i64 addr:$src),
708 (MOVAPSrm addr:$src)>;
709 def : Pat<(loadv2i64 addr:$src),
710 (MOVUPSrm addr:$src)>;
712 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
713 (MOVAPSmr addr:$dst, VR128:$src)>;
714 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
715 (MOVAPSmr addr:$dst, VR128:$src)>;
716 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
717 (MOVAPSmr addr:$dst, VR128:$src)>;
718 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
719 (MOVAPSmr addr:$dst, VR128:$src)>;
720 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
721 (MOVUPSmr addr:$dst, VR128:$src)>;
722 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
723 (MOVUPSmr addr:$dst, VR128:$src)>;
724 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
725 (MOVUPSmr addr:$dst, VR128:$src)>;
726 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
727 (MOVUPSmr addr:$dst, VR128:$src)>;
730 // Use vmovaps/vmovups for AVX integer load/store.
731 let Predicates = [HasAVX] in {
732 // 128-bit load/store
733 def : Pat<(alignedloadv4i32 addr:$src),
734 (VMOVAPSrm addr:$src)>;
735 def : Pat<(loadv4i32 addr:$src),
736 (VMOVUPSrm addr:$src)>;
737 def : Pat<(alignedloadv2i64 addr:$src),
738 (VMOVAPSrm addr:$src)>;
739 def : Pat<(loadv2i64 addr:$src),
740 (VMOVUPSrm addr:$src)>;
742 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
743 (VMOVAPSmr addr:$dst, VR128:$src)>;
744 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
745 (VMOVAPSmr addr:$dst, VR128:$src)>;
746 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
747 (VMOVAPSmr addr:$dst, VR128:$src)>;
748 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
749 (VMOVAPSmr addr:$dst, VR128:$src)>;
750 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
751 (VMOVUPSmr addr:$dst, VR128:$src)>;
752 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
753 (VMOVUPSmr addr:$dst, VR128:$src)>;
754 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
755 (VMOVUPSmr addr:$dst, VR128:$src)>;
756 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
757 (VMOVUPSmr addr:$dst, VR128:$src)>;
759 // 256-bit load/store
760 def : Pat<(alignedloadv4i64 addr:$src),
761 (VMOVAPSYrm addr:$src)>;
762 def : Pat<(loadv4i64 addr:$src),
763 (VMOVUPSYrm addr:$src)>;
764 def : Pat<(alignedloadv8i32 addr:$src),
765 (VMOVAPSYrm addr:$src)>;
766 def : Pat<(loadv8i32 addr:$src),
767 (VMOVUPSYrm addr:$src)>;
768 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
769 (VMOVAPSYmr addr:$dst, VR256:$src)>;
770 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
771 (VMOVAPSYmr addr:$dst, VR256:$src)>;
772 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
773 (VMOVAPSYmr addr:$dst, VR256:$src)>;
774 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
775 (VMOVAPSYmr addr:$dst, VR256:$src)>;
776 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
777 (VMOVUPSYmr addr:$dst, VR256:$src)>;
778 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
779 (VMOVUPSYmr addr:$dst, VR256:$src)>;
780 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
781 (VMOVUPSYmr addr:$dst, VR256:$src)>;
782 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
783 (VMOVUPSYmr addr:$dst, VR256:$src)>;
786 //===----------------------------------------------------------------------===//
787 // SSE 1 & 2 - Move Low packed FP Instructions
788 //===----------------------------------------------------------------------===//
790 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
791 PatFrag mov_frag, string base_opc,
793 def PSrm : PI<opc, MRMSrcMem,
794 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
795 !strconcat(base_opc, "s", asm_opr),
798 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
799 SSEPackedSingle>, TB;
801 def PDrm : PI<opc, MRMSrcMem,
802 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
803 !strconcat(base_opc, "d", asm_opr),
804 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
805 (scalar_to_vector (loadf64 addr:$src2)))))],
806 SSEPackedDouble>, TB, OpSize;
809 let AddedComplexity = 20 in {
810 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
811 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
813 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
814 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
815 "\t{$src2, $dst|$dst, $src2}">;
818 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
819 "movlps\t{$src, $dst|$dst, $src}",
820 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
821 (iPTR 0))), addr:$dst)]>, VEX;
822 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
823 "movlpd\t{$src, $dst|$dst, $src}",
824 [(store (f64 (vector_extract (v2f64 VR128:$src),
825 (iPTR 0))), addr:$dst)]>, VEX;
826 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
827 "movlps\t{$src, $dst|$dst, $src}",
828 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
829 (iPTR 0))), addr:$dst)]>;
830 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
831 "movlpd\t{$src, $dst|$dst, $src}",
832 [(store (f64 (vector_extract (v2f64 VR128:$src),
833 (iPTR 0))), addr:$dst)]>;
835 let Predicates = [HasAVX] in {
836 let AddedComplexity = 20 in {
837 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
838 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
839 (VMOVLPSrm VR128:$src1, addr:$src2)>;
840 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
841 (VMOVLPSrm VR128:$src1, addr:$src2)>;
842 // vector_shuffle v1, (load v2) <2, 1> using MOVLPS
843 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
844 (VMOVLPDrm VR128:$src1, addr:$src2)>;
845 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
846 (VMOVLPDrm VR128:$src1, addr:$src2)>;
849 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
850 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
851 (VMOVLPSmr addr:$src1, VR128:$src2)>;
852 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)),
853 VR128:$src2)), addr:$src1),
854 (VMOVLPSmr addr:$src1, VR128:$src2)>;
856 // (store (vector_shuffle (load addr), v2, <2, 1>), addr) using MOVLPS
857 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
858 (VMOVLPDmr addr:$src1, VR128:$src2)>;
859 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
860 (VMOVLPDmr addr:$src1, VR128:$src2)>;
862 // Shuffle with VMOVLPS
863 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
864 (VMOVLPSrm VR128:$src1, addr:$src2)>;
865 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
866 (VMOVLPSrm VR128:$src1, addr:$src2)>;
867 def : Pat<(X86Movlps VR128:$src1,
868 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
869 (VMOVLPSrm VR128:$src1, addr:$src2)>;
871 // Shuffle with VMOVLPD
872 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
873 (VMOVLPDrm VR128:$src1, addr:$src2)>;
874 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
875 (VMOVLPDrm VR128:$src1, addr:$src2)>;
876 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
877 (scalar_to_vector (loadf64 addr:$src2)))),
878 (VMOVLPDrm VR128:$src1, addr:$src2)>;
881 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
883 (VMOVLPSmr addr:$src1, VR128:$src2)>;
884 def : Pat<(store (v4i32 (X86Movlps
885 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
886 (VMOVLPSmr addr:$src1, VR128:$src2)>;
887 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
889 (VMOVLPDmr addr:$src1, VR128:$src2)>;
890 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
892 (VMOVLPDmr addr:$src1, VR128:$src2)>;
895 let Predicates = [HasSSE1] in {
896 let AddedComplexity = 20 in {
897 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
898 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
899 (MOVLPSrm VR128:$src1, addr:$src2)>;
900 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
901 (MOVLPSrm VR128:$src1, addr:$src2)>;
904 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
905 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
906 (MOVLPSmr addr:$src1, VR128:$src2)>;
907 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)),
908 VR128:$src2)), addr:$src1),
909 (MOVLPSmr addr:$src1, VR128:$src2)>;
911 // Shuffle with MOVLPS
912 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
913 (MOVLPSrm VR128:$src1, addr:$src2)>;
914 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
915 (MOVLPSrm VR128:$src1, addr:$src2)>;
916 def : Pat<(X86Movlps VR128:$src1,
917 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
918 (MOVLPSrm VR128:$src1, addr:$src2)>;
921 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
923 (MOVLPSmr addr:$src1, VR128:$src2)>;
924 def : Pat<(store (v4i32 (X86Movlps
925 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
927 (MOVLPSmr addr:$src1, VR128:$src2)>;
930 let Predicates = [HasSSE2] in {
931 let AddedComplexity = 20 in {
932 // vector_shuffle v1, (load v2) <2, 1> using MOVLPS
933 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
934 (MOVLPDrm VR128:$src1, addr:$src2)>;
935 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
936 (MOVLPDrm VR128:$src1, addr:$src2)>;
939 // (store (vector_shuffle (load addr), v2, <2, 1>), addr) using MOVLPS
940 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
941 (MOVLPDmr addr:$src1, VR128:$src2)>;
942 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
943 (MOVLPDmr addr:$src1, VR128:$src2)>;
945 // Shuffle with MOVLPD
946 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
947 (MOVLPDrm VR128:$src1, addr:$src2)>;
948 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
949 (MOVLPDrm VR128:$src1, addr:$src2)>;
950 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
951 (scalar_to_vector (loadf64 addr:$src2)))),
952 (MOVLPDrm VR128:$src1, addr:$src2)>;
955 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
957 (MOVLPDmr addr:$src1, VR128:$src2)>;
958 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
960 (MOVLPDmr addr:$src1, VR128:$src2)>;
963 //===----------------------------------------------------------------------===//
964 // SSE 1 & 2 - Move Hi packed FP Instructions
965 //===----------------------------------------------------------------------===//
967 let AddedComplexity = 20 in {
968 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
969 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
971 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
972 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
973 "\t{$src2, $dst|$dst, $src2}">;
976 // v2f64 extract element 1 is always custom lowered to unpack high to low
977 // and extract element 0 so the non-store version isn't too horrible.
978 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
979 "movhps\t{$src, $dst|$dst, $src}",
980 [(store (f64 (vector_extract
981 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
982 (undef)), (iPTR 0))), addr:$dst)]>,
984 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
985 "movhpd\t{$src, $dst|$dst, $src}",
986 [(store (f64 (vector_extract
987 (v2f64 (unpckh VR128:$src, (undef))),
988 (iPTR 0))), addr:$dst)]>,
990 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
991 "movhps\t{$src, $dst|$dst, $src}",
992 [(store (f64 (vector_extract
993 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
994 (undef)), (iPTR 0))), addr:$dst)]>;
995 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
996 "movhpd\t{$src, $dst|$dst, $src}",
997 [(store (f64 (vector_extract
998 (v2f64 (unpckh VR128:$src, (undef))),
999 (iPTR 0))), addr:$dst)]>;
1001 let Predicates = [HasAVX] in {
1003 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1004 (VMOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
1005 def : Pat<(X86Movlhps VR128:$src1,
1006 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1007 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1008 def : Pat<(X86Movlhps VR128:$src1,
1009 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1010 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1012 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
1013 // is during lowering, where it's not possible to recognize the load fold cause
1014 // it has two uses through a bitcast. One use disappears at isel time and the
1015 // fold opportunity reappears.
1016 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
1017 (scalar_to_vector (loadf64 addr:$src2)))),
1018 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1020 // FIXME: This should be matched by a X86Movhpd instead. Same as above
1021 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
1022 (scalar_to_vector (loadf64 addr:$src2)))),
1023 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1026 def : Pat<(store (f64 (vector_extract
1027 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))), addr:$dst),
1028 (VMOVHPSmr addr:$dst, VR128:$src)>;
1029 def : Pat<(store (f64 (vector_extract
1030 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))), addr:$dst),
1031 (VMOVHPDmr addr:$dst, VR128:$src)>;
1034 let Predicates = [HasSSE1] in {
1036 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1037 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
1038 def : Pat<(X86Movlhps VR128:$src1,
1039 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1040 (MOVHPSrm VR128:$src1, addr:$src2)>;
1041 def : Pat<(X86Movlhps VR128:$src1,
1042 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1043 (MOVHPSrm VR128:$src1, addr:$src2)>;
1046 def : Pat<(store (f64 (vector_extract
1047 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))), addr:$dst),
1048 (MOVHPSmr addr:$dst, VR128:$src)>;
1051 let Predicates = [HasSSE2] in {
1052 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
1053 // is during lowering, where it's not possible to recognize the load fold cause
1054 // it has two uses through a bitcast. One use disappears at isel time and the
1055 // fold opportunity reappears.
1056 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
1057 (scalar_to_vector (loadf64 addr:$src2)))),
1058 (MOVHPDrm VR128:$src1, addr:$src2)>;
1060 // FIXME: This should be matched by a X86Movhpd instead. Same as above
1061 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
1062 (scalar_to_vector (loadf64 addr:$src2)))),
1063 (MOVHPDrm VR128:$src1, addr:$src2)>;
1066 def : Pat<(store (f64 (vector_extract
1067 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
1068 (MOVHPDmr addr:$dst, VR128:$src)>;
1071 //===----------------------------------------------------------------------===//
1072 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1073 //===----------------------------------------------------------------------===//
1075 let AddedComplexity = 20 in {
1076 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1077 (ins VR128:$src1, VR128:$src2),
1078 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1080 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
1082 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1083 (ins VR128:$src1, VR128:$src2),
1084 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1086 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
1089 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1090 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1091 (ins VR128:$src1, VR128:$src2),
1092 "movlhps\t{$src2, $dst|$dst, $src2}",
1094 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
1095 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1096 (ins VR128:$src1, VR128:$src2),
1097 "movhlps\t{$src2, $dst|$dst, $src2}",
1099 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
1102 let Predicates = [HasAVX] in {
1104 let AddedComplexity = 20 in {
1105 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
1106 (VMOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
1107 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
1108 (VMOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
1110 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
1111 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
1112 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1114 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
1115 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1116 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1117 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1118 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1119 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1122 let AddedComplexity = 20 in {
1123 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
1124 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
1125 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1127 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
1128 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
1129 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
1130 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
1131 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
1134 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
1135 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1136 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1137 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1140 let Predicates = [HasSSE1] in {
1142 let AddedComplexity = 20 in {
1143 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
1144 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
1145 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
1146 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
1148 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
1149 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
1150 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1152 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
1153 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1154 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1155 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1156 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1157 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1160 let AddedComplexity = 20 in {
1161 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
1162 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
1163 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1165 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
1166 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
1167 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
1168 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
1169 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
1172 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
1173 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1174 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1175 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1178 //===----------------------------------------------------------------------===//
1179 // SSE 1 & 2 - Conversion Instructions
1180 //===----------------------------------------------------------------------===//
1182 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1183 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1185 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1186 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
1187 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1188 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
1191 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1192 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1193 string asm, Domain d> {
1194 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1195 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
1196 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1197 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
1200 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1201 X86MemOperand x86memop, string asm> {
1202 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1203 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1204 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1205 (ins DstRC:$src1, x86memop:$src),
1206 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1209 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1210 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
1211 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1212 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
1214 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1215 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
1216 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1217 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
1220 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1221 // register, but the same isn't true when only using memory operands,
1222 // provide other assembly "l" and "q" forms to address this explicitly
1223 // where appropriate to do so.
1224 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
1226 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
1228 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
1230 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
1232 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
1235 let Predicates = [HasAVX] in {
1236 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1237 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1238 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1239 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1240 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1241 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1242 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1243 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1245 def : Pat<(f32 (sint_to_fp GR32:$src)),
1246 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1247 def : Pat<(f32 (sint_to_fp GR64:$src)),
1248 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1249 def : Pat<(f64 (sint_to_fp GR32:$src)),
1250 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1251 def : Pat<(f64 (sint_to_fp GR64:$src)),
1252 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1255 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1256 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
1257 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1258 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
1259 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1260 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
1261 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1262 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
1263 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1264 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
1265 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1266 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
1267 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1268 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
1269 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1270 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
1272 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1273 // and/or XMM operand(s).
1275 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1276 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
1278 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1279 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1280 [(set DstRC:$dst, (Int SrcRC:$src))]>;
1281 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
1282 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1283 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
1286 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1287 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1288 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
1289 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1291 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1292 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1293 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
1294 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1295 (ins DstRC:$src1, x86memop:$src2),
1297 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1298 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1299 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
1302 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1303 f128mem, load, "cvtsd2si">, XD, VEX;
1304 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1305 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
1308 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
1309 // Get rid of this hack or rename the intrinsics, there are several
1310 // intructions that only match with the intrinsic form, why create duplicates
1311 // to let them be recognized by the assembler?
1312 let Pattern = []<dag> in {
1313 defm VCVTSD2SI : sse12_cvt_s<0x2D, FR64, GR32, undef, f64mem, load,
1314 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
1315 defm VCVTSD2SI64 : sse12_cvt_s<0x2D, FR64, GR64, undef, f64mem, load,
1316 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
1318 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1319 f128mem, load, "cvtsd2si{l}">, XD;
1320 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1321 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
1324 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1325 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
1326 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1327 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
1329 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1330 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
1331 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1332 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
1335 let Constraints = "$src1 = $dst" in {
1336 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1337 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1339 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1340 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1341 "cvtsi2ss{q}">, XS, REX_W;
1342 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1343 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1345 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1346 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1347 "cvtsi2sd">, XD, REX_W;
1352 // Aliases for intrinsics
1353 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1354 f32mem, load, "cvttss2si">, XS, VEX;
1355 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1356 int_x86_sse_cvttss2si64, f32mem, load,
1357 "cvttss2si">, XS, VEX, VEX_W;
1358 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1359 f128mem, load, "cvttsd2si">, XD, VEX;
1360 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1361 int_x86_sse2_cvttsd2si64, f128mem, load,
1362 "cvttsd2si">, XD, VEX, VEX_W;
1363 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1364 f32mem, load, "cvttss2si">, XS;
1365 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1366 int_x86_sse_cvttss2si64, f32mem, load,
1367 "cvttss2si{q}">, XS, REX_W;
1368 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1369 f128mem, load, "cvttsd2si">, XD;
1370 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1371 int_x86_sse2_cvttsd2si64, f128mem, load,
1372 "cvttsd2si{q}">, XD, REX_W;
1374 let Pattern = []<dag> in {
1375 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
1376 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
1377 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
1378 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
1380 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
1381 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1382 SSEPackedSingle>, TB, VEX;
1383 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
1384 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1385 SSEPackedSingle>, TB, VEX;
1388 let Pattern = []<dag> in {
1389 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
1390 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
1391 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
1392 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
1393 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
1394 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1395 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
1398 let Predicates = [HasSSE1] in {
1399 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
1400 (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
1401 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
1402 (CVTSS2SIrm addr:$src)>;
1403 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
1404 (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
1405 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
1406 (CVTSS2SI64rm addr:$src)>;
1409 let Predicates = [HasAVX] in {
1410 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
1411 (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
1412 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
1413 (VCVTSS2SIrm addr:$src)>;
1414 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
1415 (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
1416 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
1417 (VCVTSS2SI64rm addr:$src)>;
1422 // Convert scalar double to scalar single
1423 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1424 (ins FR64:$src1, FR64:$src2),
1425 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1427 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1428 (ins FR64:$src1, f64mem:$src2),
1429 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1430 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
1431 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1434 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1435 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1436 [(set FR32:$dst, (fround FR64:$src))]>;
1437 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1438 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1439 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
1440 Requires<[HasSSE2, OptForSize]>;
1442 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1443 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
1445 let Constraints = "$src1 = $dst" in
1446 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1447 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
1449 // Convert scalar single to scalar double
1450 // SSE2 instructions with XS prefix
1451 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1452 (ins FR32:$src1, FR32:$src2),
1453 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1454 []>, XS, Requires<[HasAVX]>, VEX_4V;
1455 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1456 (ins FR32:$src1, f32mem:$src2),
1457 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1458 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
1460 let Predicates = [HasAVX] in {
1461 def : Pat<(f64 (fextend FR32:$src)),
1462 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
1463 def : Pat<(fextend (loadf32 addr:$src)),
1464 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1465 def : Pat<(extloadf32 addr:$src),
1466 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1469 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1470 "cvtss2sd\t{$src, $dst|$dst, $src}",
1471 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1472 Requires<[HasSSE2]>;
1473 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1474 "cvtss2sd\t{$src, $dst|$dst, $src}",
1475 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1476 Requires<[HasSSE2, OptForSize]>;
1478 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1479 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1480 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1481 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1482 VR128:$src2))]>, XS, VEX_4V,
1484 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1485 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1486 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1487 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1488 (load addr:$src2)))]>, XS, VEX_4V,
1490 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1491 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1492 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1493 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1494 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1495 VR128:$src2))]>, XS,
1496 Requires<[HasSSE2]>;
1497 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1498 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1499 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1500 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1501 (load addr:$src2)))]>, XS,
1502 Requires<[HasSSE2]>;
1505 def : Pat<(extloadf32 addr:$src),
1506 (CVTSS2SDrr (MOVSSrm addr:$src))>,
1507 Requires<[HasSSE2, OptForSpeed]>;
1509 // Convert doubleword to packed single/double fp
1510 // SSE2 instructions without OpSize prefix
1511 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1512 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1513 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1514 TB, VEX, Requires<[HasAVX]>;
1515 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1516 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1517 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1518 (bitconvert (memopv2i64 addr:$src))))]>,
1519 TB, VEX, Requires<[HasAVX]>;
1520 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1521 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1522 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1523 TB, Requires<[HasSSE2]>;
1524 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1525 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1526 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1527 (bitconvert (memopv2i64 addr:$src))))]>,
1528 TB, Requires<[HasSSE2]>;
1530 // FIXME: why the non-intrinsic version is described as SSE3?
1531 // SSE2 instructions with XS prefix
1532 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1533 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1534 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1535 XS, VEX, Requires<[HasAVX]>;
1536 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1537 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1538 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1539 (bitconvert (memopv2i64 addr:$src))))]>,
1540 XS, VEX, Requires<[HasAVX]>;
1541 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1542 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1543 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1544 XS, Requires<[HasSSE2]>;
1545 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1546 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1547 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1548 (bitconvert (memopv2i64 addr:$src))))]>,
1549 XS, Requires<[HasSSE2]>;
1552 // Convert packed single/double fp to doubleword
1553 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1554 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1555 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1556 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1557 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1558 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1559 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1560 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1561 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1562 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1563 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1564 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1566 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1567 "cvtps2dq\t{$src, $dst|$dst, $src}",
1568 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
1570 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
1572 "cvtps2dq\t{$src, $dst|$dst, $src}",
1573 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1574 (memop addr:$src)))]>, VEX;
1575 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1576 "cvtps2dq\t{$src, $dst|$dst, $src}",
1577 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1578 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1579 "cvtps2dq\t{$src, $dst|$dst, $src}",
1580 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1581 (memop addr:$src)))]>;
1583 // SSE2 packed instructions with XD prefix
1584 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1585 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1586 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1587 XD, VEX, Requires<[HasAVX]>;
1588 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1589 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1590 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1591 (memop addr:$src)))]>,
1592 XD, VEX, Requires<[HasAVX]>;
1593 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1594 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1595 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1596 XD, Requires<[HasSSE2]>;
1597 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1598 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1599 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1600 (memop addr:$src)))]>,
1601 XD, Requires<[HasSSE2]>;
1604 // Convert with truncation packed single/double fp to doubleword
1605 // SSE2 packed instructions with XS prefix
1606 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1607 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1608 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1609 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1610 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1611 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1612 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1613 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1614 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1615 "cvttps2dq\t{$src, $dst|$dst, $src}",
1617 (int_x86_sse2_cvttps2dq VR128:$src))]>;
1618 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1619 "cvttps2dq\t{$src, $dst|$dst, $src}",
1621 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
1623 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1624 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1626 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1627 XS, VEX, Requires<[HasAVX]>;
1628 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1629 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1630 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1631 (memop addr:$src)))]>,
1632 XS, VEX, Requires<[HasAVX]>;
1634 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1635 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
1636 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1637 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
1639 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1640 (Int_VCVTDQ2PSrr VR128:$src)>, Requires<[HasAVX]>;
1641 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1642 (VCVTTPS2DQrr VR128:$src)>, Requires<[HasAVX]>;
1643 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1644 (VCVTDQ2PSYrr VR256:$src)>, Requires<[HasAVX]>;
1645 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1646 (VCVTTPS2DQYrr VR256:$src)>, Requires<[HasAVX]>;
1648 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1650 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1651 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1653 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1655 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1656 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1657 (memop addr:$src)))]>, VEX;
1658 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1659 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1660 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1661 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1662 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1663 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1664 (memop addr:$src)))]>;
1666 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1667 // register, but the same isn't true when using memory operands instead.
1668 // Provide other assembly rr and rm forms to address this explicitly.
1669 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1670 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1671 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1672 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1675 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1676 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1677 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1678 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1681 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1682 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
1683 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1684 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1686 // Convert packed single to packed double
1687 let Predicates = [HasAVX] in {
1688 // SSE2 instructions without OpSize prefix
1689 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1690 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1691 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1692 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1693 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1694 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1695 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1696 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1698 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1699 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1700 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1701 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1703 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1704 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1705 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1706 TB, VEX, Requires<[HasAVX]>;
1707 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1708 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1709 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1710 (load addr:$src)))]>,
1711 TB, VEX, Requires<[HasAVX]>;
1712 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1713 "cvtps2pd\t{$src, $dst|$dst, $src}",
1714 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1715 TB, Requires<[HasSSE2]>;
1716 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1717 "cvtps2pd\t{$src, $dst|$dst, $src}",
1718 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1719 (load addr:$src)))]>,
1720 TB, Requires<[HasSSE2]>;
1722 // Convert packed double to packed single
1723 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1724 // register, but the same isn't true when using memory operands instead.
1725 // Provide other assembly rr and rm forms to address this explicitly.
1726 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1727 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1728 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1729 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1732 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1733 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1734 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1735 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1738 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1739 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1740 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1741 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1742 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1743 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1744 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1745 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1748 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1749 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1750 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1751 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1753 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1754 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1755 (memop addr:$src)))]>;
1756 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1757 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1758 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1759 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1760 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1761 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1762 (memop addr:$src)))]>;
1764 // AVX 256-bit register conversion intrinsics
1765 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1766 // whenever possible to avoid declaring two versions of each one.
1767 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1768 (VCVTDQ2PSYrr VR256:$src)>;
1769 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1770 (VCVTDQ2PSYrm addr:$src)>;
1772 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1773 (VCVTPD2PSYrr VR256:$src)>;
1774 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1775 (VCVTPD2PSYrm addr:$src)>;
1777 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1778 (VCVTPS2DQYrr VR256:$src)>;
1779 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1780 (VCVTPS2DQYrm addr:$src)>;
1782 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1783 (VCVTPS2PDYrr VR128:$src)>;
1784 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1785 (VCVTPS2PDYrm addr:$src)>;
1787 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1788 (VCVTTPD2DQYrr VR256:$src)>;
1789 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1790 (VCVTTPD2DQYrm addr:$src)>;
1792 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1793 (VCVTTPS2DQYrr VR256:$src)>;
1794 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1795 (VCVTTPS2DQYrm addr:$src)>;
1797 // Match fround and fextend for 128/256-bit conversions
1798 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1799 (VCVTPD2PSYrr VR256:$src)>;
1800 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1801 (VCVTPD2PSYrm addr:$src)>;
1803 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
1804 (VCVTPS2PDYrr VR128:$src)>;
1805 def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
1806 (VCVTPS2PDYrm addr:$src)>;
1808 //===----------------------------------------------------------------------===//
1809 // SSE 1 & 2 - Compare Instructions
1810 //===----------------------------------------------------------------------===//
1812 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1813 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1814 string asm, string asm_alt> {
1815 let isAsmParserOnly = 1 in {
1816 def rr : SIi8<0xC2, MRMSrcReg,
1817 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1820 def rm : SIi8<0xC2, MRMSrcMem,
1821 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1825 // Accept explicit immediate argument form instead of comparison code.
1826 def rr_alt : SIi8<0xC2, MRMSrcReg,
1827 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1830 def rm_alt : SIi8<0xC2, MRMSrcMem,
1831 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1835 let neverHasSideEffects = 1 in {
1836 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1837 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1838 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1840 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1841 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1842 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1846 let Constraints = "$src1 = $dst" in {
1847 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1848 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1849 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1850 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1851 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1852 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1853 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1854 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1855 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1856 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1857 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1858 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1859 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1860 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1861 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1862 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1864 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1865 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1866 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1867 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1868 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1869 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1870 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1871 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1872 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1873 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1874 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1875 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1876 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1879 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1880 Intrinsic Int, string asm> {
1881 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1882 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1883 [(set VR128:$dst, (Int VR128:$src1,
1884 VR128:$src, imm:$cc))]>;
1885 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1886 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1887 [(set VR128:$dst, (Int VR128:$src1,
1888 (load addr:$src), imm:$cc))]>;
1891 // Aliases to match intrinsics which expect XMM operand(s).
1892 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1893 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1895 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1896 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1898 let Constraints = "$src1 = $dst" in {
1899 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1900 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1901 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1902 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1906 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1907 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1908 ValueType vt, X86MemOperand x86memop,
1909 PatFrag ld_frag, string OpcodeStr, Domain d> {
1910 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1911 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1912 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1913 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1914 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1915 [(set EFLAGS, (OpNode (vt RC:$src1),
1916 (ld_frag addr:$src2)))], d>;
1919 let Defs = [EFLAGS] in {
1920 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1921 "ucomiss", SSEPackedSingle>, TB, VEX;
1922 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1923 "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
1924 let Pattern = []<dag> in {
1925 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1926 "comiss", SSEPackedSingle>, TB, VEX;
1927 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1928 "comisd", SSEPackedDouble>, TB, OpSize, VEX;
1931 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1932 load, "ucomiss", SSEPackedSingle>, TB, VEX;
1933 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1934 load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
1936 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1937 load, "comiss", SSEPackedSingle>, TB, VEX;
1938 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1939 load, "comisd", SSEPackedDouble>, TB, OpSize, VEX;
1940 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1941 "ucomiss", SSEPackedSingle>, TB;
1942 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1943 "ucomisd", SSEPackedDouble>, TB, OpSize;
1945 let Pattern = []<dag> in {
1946 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1947 "comiss", SSEPackedSingle>, TB;
1948 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1949 "comisd", SSEPackedDouble>, TB, OpSize;
1952 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1953 load, "ucomiss", SSEPackedSingle>, TB;
1954 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1955 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1957 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1958 "comiss", SSEPackedSingle>, TB;
1959 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1960 "comisd", SSEPackedDouble>, TB, OpSize;
1961 } // Defs = [EFLAGS]
1963 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1964 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1965 Intrinsic Int, string asm, string asm_alt,
1967 let isAsmParserOnly = 1 in {
1968 def rri : PIi8<0xC2, MRMSrcReg,
1969 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1970 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1971 def rmi : PIi8<0xC2, MRMSrcMem,
1972 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1973 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1976 // Accept explicit immediate argument form instead of comparison code.
1977 def rri_alt : PIi8<0xC2, MRMSrcReg,
1978 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1980 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1981 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1985 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1986 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1987 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1988 SSEPackedSingle>, TB, VEX_4V;
1989 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1990 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1991 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1992 SSEPackedDouble>, TB, OpSize, VEX_4V;
1993 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1994 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1995 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1996 SSEPackedSingle>, TB, VEX_4V;
1997 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1998 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1999 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
2000 SSEPackedDouble>, TB, OpSize, VEX_4V;
2001 let Constraints = "$src1 = $dst" in {
2002 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
2003 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
2004 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
2005 SSEPackedSingle>, TB;
2006 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
2007 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
2008 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
2009 SSEPackedDouble>, TB, OpSize;
2012 let Predicates = [HasSSE1] in {
2013 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2014 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2015 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2016 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2019 let Predicates = [HasSSE2] in {
2020 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2021 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2022 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2023 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2026 let Predicates = [HasAVX] in {
2027 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2028 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2029 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2030 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2031 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2032 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2033 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2034 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2036 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2037 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2038 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
2039 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2040 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2041 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2042 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
2043 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2046 //===----------------------------------------------------------------------===//
2047 // SSE 1 & 2 - Shuffle Instructions
2048 //===----------------------------------------------------------------------===//
2050 /// sse12_shuffle - sse 1 & 2 shuffle instructions
2051 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2052 ValueType vt, string asm, PatFrag mem_frag,
2053 Domain d, bit IsConvertibleToThreeAddress = 0> {
2054 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2055 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
2056 [(set RC:$dst, (vt (shufp:$src3
2057 RC:$src1, (mem_frag addr:$src2))))], d>;
2058 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
2059 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2060 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
2062 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
2065 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2066 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2067 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
2068 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2069 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2070 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
2071 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2072 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2073 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
2074 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2075 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2076 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
2078 let Constraints = "$src1 = $dst" in {
2079 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2080 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2081 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
2083 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2084 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2085 memopv2f64, SSEPackedDouble>, TB, OpSize;
2088 let Predicates = [HasSSE1] in {
2089 def : Pat<(v4f32 (X86Shufps VR128:$src1,
2090 (memopv4f32 addr:$src2), (i8 imm:$imm))),
2091 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2092 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2093 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2094 def : Pat<(v4i32 (X86Shufps VR128:$src1,
2095 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2096 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2097 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2098 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2099 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
2100 // fall back to this for SSE1)
2101 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
2102 (SHUFPSrri VR128:$src2, VR128:$src1,
2103 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2104 // Special unary SHUFPSrri case.
2105 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
2106 (SHUFPSrri VR128:$src1, VR128:$src1,
2107 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2110 let Predicates = [HasSSE2] in {
2111 // Special binary v4i32 shuffle cases with SHUFPS.
2112 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
2113 (SHUFPSrri VR128:$src1, VR128:$src2,
2114 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2115 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
2116 (bc_v4i32 (memopv2i64 addr:$src2)))),
2117 (SHUFPSrmi VR128:$src1, addr:$src2,
2118 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2119 // Special unary SHUFPDrri cases.
2120 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
2121 (SHUFPDrri VR128:$src1, VR128:$src1,
2122 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2123 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
2124 (SHUFPDrri VR128:$src1, VR128:$src1,
2125 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2126 // Special binary v2i64 shuffle cases using SHUFPDrri.
2127 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
2128 (SHUFPDrri VR128:$src1, VR128:$src2,
2129 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2130 // Generic SHUFPD patterns
2131 def : Pat<(v2f64 (X86Shufps VR128:$src1,
2132 (memopv2f64 addr:$src2), (i8 imm:$imm))),
2133 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2134 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2135 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2136 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2137 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2140 let Predicates = [HasAVX] in {
2141 def : Pat<(v4f32 (X86Shufps VR128:$src1,
2142 (memopv4f32 addr:$src2), (i8 imm:$imm))),
2143 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2144 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2145 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2146 def : Pat<(v4i32 (X86Shufps VR128:$src1,
2147 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2148 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2149 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2150 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2151 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
2152 // fall back to this for SSE1)
2153 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
2154 (VSHUFPSrri VR128:$src2, VR128:$src1,
2155 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2156 // Special unary SHUFPSrri case.
2157 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
2158 (VSHUFPSrri VR128:$src1, VR128:$src1,
2159 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2160 // Special binary v4i32 shuffle cases with SHUFPS.
2161 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
2162 (VSHUFPSrri VR128:$src1, VR128:$src2,
2163 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2164 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
2165 (bc_v4i32 (memopv2i64 addr:$src2)))),
2166 (VSHUFPSrmi VR128:$src1, addr:$src2,
2167 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2168 // Special unary SHUFPDrri cases.
2169 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
2170 (VSHUFPDrri VR128:$src1, VR128:$src1,
2171 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2172 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
2173 (VSHUFPDrri VR128:$src1, VR128:$src1,
2174 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2175 // Special binary v2i64 shuffle cases using SHUFPDrri.
2176 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
2177 (VSHUFPDrri VR128:$src1, VR128:$src2,
2178 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2180 def : Pat<(v2f64 (X86Shufps VR128:$src1,
2181 (memopv2f64 addr:$src2), (i8 imm:$imm))),
2182 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2183 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2184 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2185 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2186 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2189 def : Pat<(v8i32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2190 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2191 def : Pat<(v8i32 (X86Shufps VR256:$src1,
2192 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
2193 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2195 def : Pat<(v8f32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2196 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2197 def : Pat<(v8f32 (X86Shufps VR256:$src1,
2198 (memopv8f32 addr:$src2), (i8 imm:$imm))),
2199 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2201 def : Pat<(v4i64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2202 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2203 def : Pat<(v4i64 (X86Shufpd VR256:$src1,
2204 (memopv4i64 addr:$src2), (i8 imm:$imm))),
2205 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2207 def : Pat<(v4f64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2208 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2209 def : Pat<(v4f64 (X86Shufpd VR256:$src1,
2210 (memopv4f64 addr:$src2), (i8 imm:$imm))),
2211 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2214 //===----------------------------------------------------------------------===//
2215 // SSE 1 & 2 - Unpack Instructions
2216 //===----------------------------------------------------------------------===//
2218 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
2219 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
2220 PatFrag mem_frag, RegisterClass RC,
2221 X86MemOperand x86memop, string asm,
2223 def rr : PI<opc, MRMSrcReg,
2224 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2226 (vt (OpNode RC:$src1, RC:$src2)))], d>;
2227 def rm : PI<opc, MRMSrcMem,
2228 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2230 (vt (OpNode RC:$src1,
2231 (mem_frag addr:$src2))))], d>;
2234 let AddedComplexity = 10 in {
2235 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
2236 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2237 SSEPackedSingle>, TB, VEX_4V;
2238 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
2239 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2240 SSEPackedDouble>, TB, OpSize, VEX_4V;
2241 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
2242 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2243 SSEPackedSingle>, TB, VEX_4V;
2244 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
2245 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2246 SSEPackedDouble>, TB, OpSize, VEX_4V;
2248 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
2249 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2250 SSEPackedSingle>, TB, VEX_4V;
2251 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
2252 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2253 SSEPackedDouble>, TB, OpSize, VEX_4V;
2254 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
2255 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2256 SSEPackedSingle>, TB, VEX_4V;
2257 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
2258 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2259 SSEPackedDouble>, TB, OpSize, VEX_4V;
2261 let Constraints = "$src1 = $dst" in {
2262 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
2263 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2264 SSEPackedSingle>, TB;
2265 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
2266 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2267 SSEPackedDouble>, TB, OpSize;
2268 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
2269 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2270 SSEPackedSingle>, TB;
2271 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
2272 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2273 SSEPackedDouble>, TB, OpSize;
2274 } // Constraints = "$src1 = $dst"
2275 } // AddedComplexity
2277 let Predicates = [HasSSE1] in {
2278 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
2279 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
2280 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
2281 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
2282 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
2283 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
2284 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
2285 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
2288 let Predicates = [HasSSE2] in {
2289 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
2290 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
2291 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
2292 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
2293 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
2294 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
2295 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
2296 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
2298 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
2299 // problem is during lowering, where it's not possible to recognize the load
2300 // fold cause it has two uses through a bitcast. One use disappears at isel
2301 // time and the fold opportunity reappears.
2302 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2303 (UNPCKLPDrr VR128:$src, VR128:$src)>;
2305 let AddedComplexity = 10 in
2306 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
2307 (UNPCKLPDrr VR128:$src, VR128:$src)>;
2310 let Predicates = [HasAVX] in {
2311 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
2312 (VUNPCKLPSrm VR128:$src1, addr:$src2)>;
2313 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
2314 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>;
2315 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
2316 (VUNPCKHPSrm VR128:$src1, addr:$src2)>;
2317 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
2318 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>;
2320 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
2321 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2322 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
2323 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2324 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
2325 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2326 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, (memopv8i32 addr:$src2))),
2327 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2328 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
2329 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2330 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
2331 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2332 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, (memopv8i32 addr:$src2))),
2333 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2334 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
2335 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2337 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
2338 (VUNPCKLPDrm VR128:$src1, addr:$src2)>;
2339 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
2340 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>;
2341 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
2342 (VUNPCKHPDrm VR128:$src1, addr:$src2)>;
2343 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
2344 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>;
2346 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
2347 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2348 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
2349 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2350 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, (memopv4i64 addr:$src2))),
2351 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2352 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
2353 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2354 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
2355 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2356 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
2357 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2358 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, (memopv4i64 addr:$src2))),
2359 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2360 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
2361 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2363 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
2364 // problem is during lowering, where it's not possible to recognize the load
2365 // fold cause it has two uses through a bitcast. One use disappears at isel
2366 // time and the fold opportunity reappears.
2367 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2368 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
2369 let AddedComplexity = 10 in
2370 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
2371 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
2374 //===----------------------------------------------------------------------===//
2375 // SSE 1 & 2 - Extract Floating-Point Sign mask
2376 //===----------------------------------------------------------------------===//
2378 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2379 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2381 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
2382 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2383 [(set GR32:$dst, (Int RC:$src))], d>;
2384 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
2385 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
2388 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2389 SSEPackedSingle>, TB;
2390 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2391 SSEPackedDouble>, TB, OpSize;
2393 def : Pat<(i32 (X86fgetsign FR32:$src)),
2394 (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
2395 sub_ss))>, Requires<[HasSSE1]>;
2396 def : Pat<(i64 (X86fgetsign FR32:$src)),
2397 (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
2398 sub_ss))>, Requires<[HasSSE1]>;
2399 def : Pat<(i32 (X86fgetsign FR64:$src)),
2400 (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
2401 sub_sd))>, Requires<[HasSSE2]>;
2402 def : Pat<(i64 (X86fgetsign FR64:$src)),
2403 (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
2404 sub_sd))>, Requires<[HasSSE2]>;
2406 let Predicates = [HasAVX] in {
2407 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2408 "movmskps", SSEPackedSingle>, TB, VEX;
2409 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2410 "movmskpd", SSEPackedDouble>, TB, OpSize,
2412 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2413 "movmskps", SSEPackedSingle>, TB, VEX;
2414 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2415 "movmskpd", SSEPackedDouble>, TB, OpSize,
2418 def : Pat<(i32 (X86fgetsign FR32:$src)),
2419 (VMOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
2421 def : Pat<(i64 (X86fgetsign FR32:$src)),
2422 (VMOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
2424 def : Pat<(i32 (X86fgetsign FR64:$src)),
2425 (VMOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
2427 def : Pat<(i64 (X86fgetsign FR64:$src)),
2428 (VMOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
2432 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2433 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
2434 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2435 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB, OpSize,
2437 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2438 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
2439 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2440 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB, OpSize,
2444 //===----------------------------------------------------------------------===//
2445 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
2446 //===----------------------------------------------------------------------===//
2448 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
2449 // names that start with 'Fs'.
2451 // Alias instructions that map fld0 to pxor for sse.
2452 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
2453 canFoldAsLoad = 1 in {
2454 // FIXME: Set encoding to pseudo!
2455 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
2456 [(set FR32:$dst, fp32imm0)]>,
2457 Requires<[HasSSE1]>, TB, OpSize;
2458 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
2459 [(set FR64:$dst, fpimm0)]>,
2460 Requires<[HasSSE2]>, TB, OpSize;
2461 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
2462 [(set FR32:$dst, fp32imm0)]>,
2463 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
2464 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
2465 [(set FR64:$dst, fpimm0)]>,
2466 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
2469 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
2470 // bits are disregarded.
2471 let neverHasSideEffects = 1 in {
2472 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2473 "movaps\t{$src, $dst|$dst, $src}", []>;
2474 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2475 "movapd\t{$src, $dst|$dst, $src}", []>;
2478 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
2479 // bits are disregarded.
2480 let canFoldAsLoad = 1, isReMaterializable = 1 in {
2481 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
2482 "movaps\t{$src, $dst|$dst, $src}",
2483 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
2484 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
2485 "movapd\t{$src, $dst|$dst, $src}",
2486 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
2489 //===----------------------------------------------------------------------===//
2490 // SSE 1 & 2 - Logical Instructions
2491 //===----------------------------------------------------------------------===//
2493 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
2495 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
2497 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2498 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, TB, VEX_4V;
2500 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2501 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, TB, OpSize, VEX_4V;
2503 let Constraints = "$src1 = $dst" in {
2504 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2505 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
2507 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2508 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
2512 // Alias bitwise logical operations using SSE logical ops on packed FP values.
2513 let mayLoad = 0 in {
2514 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
2515 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
2516 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
2519 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
2520 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
2522 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2524 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2526 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2527 // are all promoted to v2i64, and the patterns are covered by the int
2528 // version. This is needed in SSE only, because v2i64 isn't supported on
2529 // SSE1, but only on SSE2.
2530 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2531 !strconcat(OpcodeStr, "ps"), f128mem, [],
2532 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2533 (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
2535 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2536 !strconcat(OpcodeStr, "pd"), f128mem,
2537 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2538 (bc_v2i64 (v2f64 VR128:$src2))))],
2539 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2540 (memopv2i64 addr:$src2)))], 0>,
2542 let Constraints = "$src1 = $dst" in {
2543 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2544 !strconcat(OpcodeStr, "ps"), f128mem,
2545 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2546 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2547 (memopv2i64 addr:$src2)))]>, TB;
2549 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2550 !strconcat(OpcodeStr, "pd"), f128mem,
2551 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2552 (bc_v2i64 (v2f64 VR128:$src2))))],
2553 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2554 (memopv2i64 addr:$src2)))]>, TB, OpSize;
2558 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
2560 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
2562 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2563 !strconcat(OpcodeStr, "ps"), f256mem,
2564 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2565 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2566 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
2568 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2569 !strconcat(OpcodeStr, "pd"), f256mem,
2570 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2571 (bc_v4i64 (v4f64 VR256:$src2))))],
2572 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2573 (memopv4i64 addr:$src2)))], 0>,
2577 // AVX 256-bit packed logical ops forms
2578 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
2579 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
2580 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
2581 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
2583 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2584 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2585 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2586 let isCommutable = 0 in
2587 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2589 //===----------------------------------------------------------------------===//
2590 // SSE 1 & 2 - Arithmetic Instructions
2591 //===----------------------------------------------------------------------===//
2593 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2596 /// In addition, we also have a special variant of the scalar form here to
2597 /// represent the associated intrinsic operation. This form is unlike the
2598 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2599 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2601 /// These three forms can each be reg+reg or reg+mem.
2604 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2606 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2608 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2609 OpNode, FR32, f32mem, Is2Addr>, XS;
2610 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2611 OpNode, FR64, f64mem, Is2Addr>, XD;
2614 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
2616 let mayLoad = 0 in {
2617 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2618 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
2619 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2620 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
2624 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
2626 let mayLoad = 0 in {
2627 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
2628 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
2629 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
2630 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
2634 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2636 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2637 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
2638 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2639 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
2642 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
2644 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2645 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
2646 SSEPackedSingle, Is2Addr>, TB;
2648 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2649 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
2650 SSEPackedDouble, Is2Addr>, TB, OpSize;
2653 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
2654 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2655 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
2656 SSEPackedSingle, 0>, TB;
2658 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2659 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
2660 SSEPackedDouble, 0>, TB, OpSize;
2663 // Binary Arithmetic instructions
2664 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
2665 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
2666 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
2667 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
2668 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
2669 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
2670 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
2671 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
2673 let isCommutable = 0 in {
2674 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
2675 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
2676 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
2677 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
2678 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
2679 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
2680 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
2681 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
2682 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
2683 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
2684 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
2685 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
2686 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
2687 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
2688 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
2689 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
2690 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
2691 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
2692 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
2693 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
2696 let Constraints = "$src1 = $dst" in {
2697 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
2698 basic_sse12_fp_binop_p<0x58, "add", fadd>,
2699 basic_sse12_fp_binop_s_int<0x58, "add">;
2700 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
2701 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
2702 basic_sse12_fp_binop_s_int<0x59, "mul">;
2704 let isCommutable = 0 in {
2705 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
2706 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
2707 basic_sse12_fp_binop_s_int<0x5C, "sub">;
2708 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
2709 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
2710 basic_sse12_fp_binop_s_int<0x5E, "div">;
2711 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
2712 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
2713 basic_sse12_fp_binop_s_int<0x5F, "max">,
2714 basic_sse12_fp_binop_p_int<0x5F, "max">;
2715 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
2716 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
2717 basic_sse12_fp_binop_s_int<0x5D, "min">,
2718 basic_sse12_fp_binop_p_int<0x5D, "min">;
2723 /// In addition, we also have a special variant of the scalar form here to
2724 /// represent the associated intrinsic operation. This form is unlike the
2725 /// plain scalar form, in that it takes an entire vector (instead of a
2726 /// scalar) and leaves the top elements undefined.
2728 /// And, we have a special variant form for a full-vector intrinsic form.
2730 /// sse1_fp_unop_s - SSE1 unops in scalar form.
2731 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
2732 SDNode OpNode, Intrinsic F32Int> {
2733 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2734 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2735 [(set FR32:$dst, (OpNode FR32:$src))]>;
2736 // For scalar unary operations, fold a load into the operation
2737 // only in OptForSize mode. It eliminates an instruction, but it also
2738 // eliminates a whole-register clobber (the load), so it introduces a
2739 // partial register update condition.
2740 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
2741 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2742 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
2743 Requires<[HasSSE1, OptForSize]>;
2744 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2745 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2746 [(set VR128:$dst, (F32Int VR128:$src))]>;
2747 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
2748 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2749 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
2752 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
2753 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2754 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
2755 !strconcat(OpcodeStr,
2756 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2757 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
2758 !strconcat(OpcodeStr,
2759 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2760 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
2761 (ins ssmem:$src1, VR128:$src2),
2762 !strconcat(OpcodeStr,
2763 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2766 /// sse1_fp_unop_p - SSE1 unops in packed form.
2767 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2768 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2769 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2770 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
2771 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2772 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2773 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
2776 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
2777 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2778 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2779 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2780 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
2781 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2782 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2783 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
2786 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
2787 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2788 Intrinsic V4F32Int> {
2789 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2790 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2791 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
2792 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2793 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2794 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
2797 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
2798 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2799 Intrinsic V4F32Int> {
2800 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2801 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2802 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
2803 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2804 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2805 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
2808 /// sse2_fp_unop_s - SSE2 unops in scalar form.
2809 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
2810 SDNode OpNode, Intrinsic F64Int> {
2811 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2812 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2813 [(set FR64:$dst, (OpNode FR64:$src))]>;
2814 // See the comments in sse1_fp_unop_s for why this is OptForSize.
2815 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
2816 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2817 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
2818 Requires<[HasSSE2, OptForSize]>;
2819 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2820 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2821 [(set VR128:$dst, (F64Int VR128:$src))]>;
2822 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
2823 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2824 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
2827 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
2828 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2829 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
2830 !strconcat(OpcodeStr,
2831 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2832 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
2833 !strconcat(OpcodeStr,
2834 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2835 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
2836 (ins VR128:$src1, sdmem:$src2),
2837 !strconcat(OpcodeStr,
2838 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2841 /// sse2_fp_unop_p - SSE2 unops in vector forms.
2842 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
2844 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2845 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2846 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
2847 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2848 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2849 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
2852 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
2853 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2854 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2855 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2856 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
2857 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2858 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2859 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
2862 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
2863 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2864 Intrinsic V2F64Int> {
2865 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2866 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2867 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
2868 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2869 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2870 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
2873 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
2874 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2875 Intrinsic V2F64Int> {
2876 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2877 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2878 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
2879 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2880 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2881 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
2884 let Predicates = [HasAVX] in {
2886 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
2887 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V;
2889 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
2890 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
2891 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2892 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2893 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
2894 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
2895 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
2896 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
2899 // Reciprocal approximations. Note that these typically require refinement
2900 // in order to obtain suitable precision.
2901 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V;
2902 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
2903 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
2904 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
2905 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
2907 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V;
2908 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
2909 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
2910 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
2911 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
2914 def : Pat<(f32 (fsqrt FR32:$src)),
2915 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2916 def : Pat<(f32 (fsqrt (load addr:$src))),
2917 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2918 Requires<[HasAVX, OptForSize]>;
2919 def : Pat<(f64 (fsqrt FR64:$src)),
2920 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
2921 def : Pat<(f64 (fsqrt (load addr:$src))),
2922 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
2923 Requires<[HasAVX, OptForSize]>;
2925 def : Pat<(f32 (X86frsqrt FR32:$src)),
2926 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2927 def : Pat<(f32 (X86frsqrt (load addr:$src))),
2928 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2929 Requires<[HasAVX, OptForSize]>;
2931 def : Pat<(f32 (X86frcp FR32:$src)),
2932 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2933 def : Pat<(f32 (X86frcp (load addr:$src))),
2934 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2935 Requires<[HasAVX, OptForSize]>;
2937 let Predicates = [HasAVX] in {
2938 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
2939 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2940 (VSQRTSSr (f32 (IMPLICIT_DEF)),
2941 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2943 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
2944 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2946 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
2947 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
2948 (VSQRTSDr (f64 (IMPLICIT_DEF)),
2949 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd)),
2951 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
2952 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
2954 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
2955 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2956 (VRSQRTSSr (f32 (IMPLICIT_DEF)),
2957 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2959 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
2960 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2962 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
2963 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2964 (VRCPSSr (f32 (IMPLICIT_DEF)),
2965 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2967 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
2968 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2972 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2973 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
2974 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
2975 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2976 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
2977 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
2979 // Reciprocal approximations. Note that these typically require refinement
2980 // in order to obtain suitable precision.
2981 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2982 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
2983 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
2984 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2985 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
2986 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
2988 // There is no f64 version of the reciprocal approximation instructions.
2990 //===----------------------------------------------------------------------===//
2991 // SSE 1 & 2 - Non-temporal stores
2992 //===----------------------------------------------------------------------===//
2994 let AddedComplexity = 400 in { // Prefer non-temporal versions
2995 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2996 (ins f128mem:$dst, VR128:$src),
2997 "movntps\t{$src, $dst|$dst, $src}",
2998 [(alignednontemporalstore (v4f32 VR128:$src),
3000 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3001 (ins f128mem:$dst, VR128:$src),
3002 "movntpd\t{$src, $dst|$dst, $src}",
3003 [(alignednontemporalstore (v2f64 VR128:$src),
3005 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
3006 (ins f128mem:$dst, VR128:$src),
3007 "movntdq\t{$src, $dst|$dst, $src}",
3008 [(alignednontemporalstore (v2f64 VR128:$src),
3011 let ExeDomain = SSEPackedInt in
3012 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3013 (ins f128mem:$dst, VR128:$src),
3014 "movntdq\t{$src, $dst|$dst, $src}",
3015 [(alignednontemporalstore (v4f32 VR128:$src),
3018 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3019 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
3021 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3022 (ins f256mem:$dst, VR256:$src),
3023 "movntps\t{$src, $dst|$dst, $src}",
3024 [(alignednontemporalstore (v8f32 VR256:$src),
3026 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3027 (ins f256mem:$dst, VR256:$src),
3028 "movntpd\t{$src, $dst|$dst, $src}",
3029 [(alignednontemporalstore (v4f64 VR256:$src),
3031 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
3032 (ins f256mem:$dst, VR256:$src),
3033 "movntdq\t{$src, $dst|$dst, $src}",
3034 [(alignednontemporalstore (v4f64 VR256:$src),
3036 let ExeDomain = SSEPackedInt in
3037 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3038 (ins f256mem:$dst, VR256:$src),
3039 "movntdq\t{$src, $dst|$dst, $src}",
3040 [(alignednontemporalstore (v8f32 VR256:$src),
3044 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
3045 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3046 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
3047 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
3048 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
3049 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
3051 let AddedComplexity = 400 in { // Prefer non-temporal versions
3052 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3053 "movntps\t{$src, $dst|$dst, $src}",
3054 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
3055 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3056 "movntpd\t{$src, $dst|$dst, $src}",
3057 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
3059 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3060 "movntdq\t{$src, $dst|$dst, $src}",
3061 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
3063 let ExeDomain = SSEPackedInt in
3064 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3065 "movntdq\t{$src, $dst|$dst, $src}",
3066 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
3068 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3069 (MOVNTDQmr addr:$dst, VR128:$src)>;
3071 // There is no AVX form for instructions below this point
3072 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3073 "movnti{l}\t{$src, $dst|$dst, $src}",
3074 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
3075 TB, Requires<[HasSSE2]>;
3076 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3077 "movnti{q}\t{$src, $dst|$dst, $src}",
3078 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
3079 TB, Requires<[HasSSE2]>;
3082 //===----------------------------------------------------------------------===//
3083 // SSE 1 & 2 - Prefetch and memory fence
3084 //===----------------------------------------------------------------------===//
3086 // Prefetch intrinsic.
3087 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
3088 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
3089 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
3090 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
3091 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
3092 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
3093 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
3094 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
3096 // Load, store, and memory fence
3097 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
3098 TB, Requires<[HasSSE1]>;
3099 def : Pat<(X86SFence), (SFENCE)>;
3101 //===----------------------------------------------------------------------===//
3102 // SSE 1 & 2 - Load/Store XCSR register
3103 //===----------------------------------------------------------------------===//
3105 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3106 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
3107 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3108 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
3110 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3111 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
3112 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3113 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
3115 //===---------------------------------------------------------------------===//
3116 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3117 //===---------------------------------------------------------------------===//
3119 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3121 let neverHasSideEffects = 1 in {
3122 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3123 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3124 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3125 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3127 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3128 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
3129 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3130 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
3132 let canFoldAsLoad = 1, mayLoad = 1 in {
3133 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3134 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3135 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3136 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3137 let Predicates = [HasAVX] in {
3138 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3139 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
3140 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3141 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
3145 let mayStore = 1 in {
3146 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3147 (ins i128mem:$dst, VR128:$src),
3148 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3149 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3150 (ins i256mem:$dst, VR256:$src),
3151 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3152 let Predicates = [HasAVX] in {
3153 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3154 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
3155 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3156 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
3160 let neverHasSideEffects = 1 in
3161 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3162 "movdqa\t{$src, $dst|$dst, $src}", []>;
3164 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3165 "movdqu\t{$src, $dst|$dst, $src}",
3166 []>, XS, Requires<[HasSSE2]>;
3168 let canFoldAsLoad = 1, mayLoad = 1 in {
3169 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3170 "movdqa\t{$src, $dst|$dst, $src}",
3171 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
3172 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3173 "movdqu\t{$src, $dst|$dst, $src}",
3174 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
3175 XS, Requires<[HasSSE2]>;
3178 let mayStore = 1 in {
3179 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3180 "movdqa\t{$src, $dst|$dst, $src}",
3181 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
3182 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3183 "movdqu\t{$src, $dst|$dst, $src}",
3184 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
3185 XS, Requires<[HasSSE2]>;
3188 // Intrinsic forms of MOVDQU load and store
3189 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3190 "vmovdqu\t{$src, $dst|$dst, $src}",
3191 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
3192 XS, VEX, Requires<[HasAVX]>;
3194 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3195 "movdqu\t{$src, $dst|$dst, $src}",
3196 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
3197 XS, Requires<[HasSSE2]>;
3199 } // ExeDomain = SSEPackedInt
3201 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
3202 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3203 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3205 //===---------------------------------------------------------------------===//
3206 // SSE2 - Packed Integer Arithmetic Instructions
3207 //===---------------------------------------------------------------------===//
3209 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3211 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3212 bit IsCommutable = 0, bit Is2Addr = 1> {
3213 let isCommutable = IsCommutable in
3214 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
3215 (ins VR128:$src1, VR128:$src2),
3217 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3218 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3219 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
3220 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
3221 (ins VR128:$src1, i128mem:$src2),
3223 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3224 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3225 [(set VR128:$dst, (IntId VR128:$src1,
3226 (bitconvert (memopv2i64 addr:$src2))))]>;
3229 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
3230 string OpcodeStr, Intrinsic IntId,
3231 Intrinsic IntId2, bit Is2Addr = 1> {
3232 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
3233 (ins VR128:$src1, VR128:$src2),
3235 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3236 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3237 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
3238 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
3239 (ins VR128:$src1, i128mem:$src2),
3241 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3242 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3243 [(set VR128:$dst, (IntId VR128:$src1,
3244 (bitconvert (memopv2i64 addr:$src2))))]>;
3245 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
3246 (ins VR128:$src1, i32i8imm:$src2),
3248 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3249 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3250 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
3253 /// PDI_binop_rm - Simple SSE2 binary operator.
3254 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
3255 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
3256 let isCommutable = IsCommutable in
3257 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
3258 (ins VR128:$src1, VR128:$src2),
3260 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3261 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3262 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
3263 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
3264 (ins VR128:$src1, i128mem:$src2),
3266 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3267 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3268 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
3269 (bitconvert (memopv2i64 addr:$src2)))))]>;
3272 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
3274 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
3275 /// to collapse (bitconvert VT to VT) into its operand.
3277 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
3278 bit IsCommutable = 0, bit Is2Addr = 1> {
3279 let isCommutable = IsCommutable in
3280 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
3281 (ins VR128:$src1, VR128:$src2),
3283 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3284 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3285 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
3286 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
3287 (ins VR128:$src1, i128mem:$src2),
3289 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3290 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3291 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
3294 } // ExeDomain = SSEPackedInt
3296 // 128-bit Integer Arithmetic
3298 let Predicates = [HasAVX] in {
3299 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
3300 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
3301 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
3302 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
3303 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
3304 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
3305 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
3306 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
3307 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
3310 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
3312 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
3314 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
3316 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
3318 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
3320 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
3322 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
3324 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
3326 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
3328 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
3330 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
3332 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
3334 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
3336 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
3338 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
3340 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
3342 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
3344 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
3346 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
3350 let Constraints = "$src1 = $dst" in {
3351 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
3352 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
3353 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
3354 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
3355 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
3356 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
3357 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
3358 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
3359 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
3362 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
3363 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
3364 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
3365 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
3366 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
3367 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
3368 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
3369 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
3370 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
3371 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
3372 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
3373 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
3374 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
3375 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
3376 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
3377 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
3378 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
3379 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
3380 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
3382 } // Constraints = "$src1 = $dst"
3384 //===---------------------------------------------------------------------===//
3385 // SSE2 - Packed Integer Logical Instructions
3386 //===---------------------------------------------------------------------===//
3388 let Predicates = [HasAVX] in {
3389 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
3390 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
3392 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
3393 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
3395 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
3396 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
3399 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
3400 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
3402 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
3403 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
3405 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
3406 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
3409 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
3410 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
3412 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
3413 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
3416 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
3417 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
3418 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
3420 let ExeDomain = SSEPackedInt in {
3421 let neverHasSideEffects = 1 in {
3422 // 128-bit logical shifts.
3423 def VPSLLDQri : PDIi8<0x73, MRM7r,
3424 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3425 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
3427 def VPSRLDQri : PDIi8<0x73, MRM3r,
3428 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3429 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
3431 // PSRADQri doesn't exist in SSE[1-3].
3433 def VPANDNrr : PDI<0xDF, MRMSrcReg,
3434 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3435 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3437 (v2i64 (X86andnp VR128:$src1, VR128:$src2)))]>,VEX_4V;
3439 def VPANDNrm : PDI<0xDF, MRMSrcMem,
3440 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3441 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3442 [(set VR128:$dst, (X86andnp VR128:$src1,
3443 (memopv2i64 addr:$src2)))]>, VEX_4V;
3447 let Constraints = "$src1 = $dst" in {
3448 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
3449 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
3450 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
3451 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
3452 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
3453 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
3455 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
3456 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
3457 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
3458 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
3459 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
3460 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
3462 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
3463 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
3464 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
3465 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
3467 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
3468 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
3469 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
3471 let ExeDomain = SSEPackedInt in {
3472 let neverHasSideEffects = 1 in {
3473 // 128-bit logical shifts.
3474 def PSLLDQri : PDIi8<0x73, MRM7r,
3475 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3476 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
3477 def PSRLDQri : PDIi8<0x73, MRM3r,
3478 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3479 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
3480 // PSRADQri doesn't exist in SSE[1-3].
3482 def PANDNrr : PDI<0xDF, MRMSrcReg,
3483 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3484 "pandn\t{$src2, $dst|$dst, $src2}", []>;
3486 def PANDNrm : PDI<0xDF, MRMSrcMem,
3487 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3488 "pandn\t{$src2, $dst|$dst, $src2}", []>;
3490 } // Constraints = "$src1 = $dst"
3492 let Predicates = [HasAVX] in {
3493 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3494 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3495 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3496 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3497 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
3498 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
3499 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
3500 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
3501 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3502 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3504 // Shift up / down and insert zero's.
3505 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
3506 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3507 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
3508 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3511 let Predicates = [HasSSE2] in {
3512 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3513 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3514 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3515 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3516 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
3517 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
3518 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
3519 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
3520 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3521 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3523 // Shift up / down and insert zero's.
3524 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
3525 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3526 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
3527 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3530 //===---------------------------------------------------------------------===//
3531 // SSE2 - Packed Integer Comparison Instructions
3532 //===---------------------------------------------------------------------===//
3534 let Predicates = [HasAVX] in {
3535 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
3537 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
3539 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
3541 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
3543 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
3545 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
3548 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
3549 (VPCMPEQBrr VR128:$src1, VR128:$src2)>;
3550 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
3551 (VPCMPEQBrm VR128:$src1, addr:$src2)>;
3552 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
3553 (VPCMPEQWrr VR128:$src1, VR128:$src2)>;
3554 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
3555 (VPCMPEQWrm VR128:$src1, addr:$src2)>;
3556 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
3557 (VPCMPEQDrr VR128:$src1, VR128:$src2)>;
3558 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
3559 (VPCMPEQDrm VR128:$src1, addr:$src2)>;
3561 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
3562 (VPCMPGTBrr VR128:$src1, VR128:$src2)>;
3563 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
3564 (VPCMPGTBrm VR128:$src1, addr:$src2)>;
3565 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
3566 (VPCMPGTWrr VR128:$src1, VR128:$src2)>;
3567 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
3568 (VPCMPGTWrm VR128:$src1, addr:$src2)>;
3569 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
3570 (VPCMPGTDrr VR128:$src1, VR128:$src2)>;
3571 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
3572 (VPCMPGTDrm VR128:$src1, addr:$src2)>;
3575 let Constraints = "$src1 = $dst" in {
3576 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
3577 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
3578 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
3579 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
3580 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
3581 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
3582 } // Constraints = "$src1 = $dst"
3584 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
3585 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
3586 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
3587 (PCMPEQBrm VR128:$src1, addr:$src2)>;
3588 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
3589 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
3590 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
3591 (PCMPEQWrm VR128:$src1, addr:$src2)>;
3592 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
3593 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
3594 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
3595 (PCMPEQDrm VR128:$src1, addr:$src2)>;
3597 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
3598 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
3599 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
3600 (PCMPGTBrm VR128:$src1, addr:$src2)>;
3601 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
3602 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
3603 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
3604 (PCMPGTWrm VR128:$src1, addr:$src2)>;
3605 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
3606 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
3607 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
3608 (PCMPGTDrm VR128:$src1, addr:$src2)>;
3610 //===---------------------------------------------------------------------===//
3611 // SSE2 - Packed Integer Pack Instructions
3612 //===---------------------------------------------------------------------===//
3614 let Predicates = [HasAVX] in {
3615 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
3617 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
3619 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
3623 let Constraints = "$src1 = $dst" in {
3624 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
3625 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
3626 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
3627 } // Constraints = "$src1 = $dst"
3629 //===---------------------------------------------------------------------===//
3630 // SSE2 - Packed Integer Shuffle Instructions
3631 //===---------------------------------------------------------------------===//
3633 let ExeDomain = SSEPackedInt in {
3634 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
3636 def ri : Ii8<0x70, MRMSrcReg,
3637 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
3638 !strconcat(OpcodeStr,
3639 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3640 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
3642 def mi : Ii8<0x70, MRMSrcMem,
3643 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
3644 !strconcat(OpcodeStr,
3645 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3646 [(set VR128:$dst, (vt (pshuf_frag:$src2
3647 (bc_frag (memopv2i64 addr:$src1)),
3650 } // ExeDomain = SSEPackedInt
3652 let Predicates = [HasAVX] in {
3653 let AddedComplexity = 5 in
3654 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize,
3657 // SSE2 with ImmT == Imm8 and XS prefix.
3658 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
3661 // SSE2 with ImmT == Imm8 and XD prefix.
3662 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
3665 let AddedComplexity = 5 in
3666 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3667 (VPSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3668 // Unary v4f32 shuffle with VPSHUF* in order to fold a load.
3669 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3670 (VPSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3672 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3674 (VPSHUFDmi addr:$src1, imm:$imm)>;
3675 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3677 (VPSHUFDmi addr:$src1, imm:$imm)>;
3678 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3679 (VPSHUFDri VR128:$src1, imm:$imm)>;
3680 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3681 (VPSHUFDri VR128:$src1, imm:$imm)>;
3682 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3683 (VPSHUFHWri VR128:$src, imm:$imm)>;
3684 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3686 (VPSHUFHWmi addr:$src, imm:$imm)>;
3687 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3688 (VPSHUFLWri VR128:$src, imm:$imm)>;
3689 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3691 (VPSHUFLWmi addr:$src, imm:$imm)>;
3694 let Predicates = [HasSSE2] in {
3695 let AddedComplexity = 5 in
3696 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
3698 // SSE2 with ImmT == Imm8 and XS prefix.
3699 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
3701 // SSE2 with ImmT == Imm8 and XD prefix.
3702 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
3704 let AddedComplexity = 5 in
3705 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3706 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3707 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3708 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3709 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3711 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3713 (PSHUFDmi addr:$src1, imm:$imm)>;
3714 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3716 (PSHUFDmi addr:$src1, imm:$imm)>;
3717 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3718 (PSHUFDri VR128:$src1, imm:$imm)>;
3719 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3720 (PSHUFDri VR128:$src1, imm:$imm)>;
3721 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3722 (PSHUFHWri VR128:$src, imm:$imm)>;
3723 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3725 (PSHUFHWmi addr:$src, imm:$imm)>;
3726 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3727 (PSHUFLWri VR128:$src, imm:$imm)>;
3728 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3730 (PSHUFLWmi addr:$src, imm:$imm)>;
3733 //===---------------------------------------------------------------------===//
3734 // SSE2 - Packed Integer Unpack Instructions
3735 //===---------------------------------------------------------------------===//
3737 let ExeDomain = SSEPackedInt in {
3738 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
3739 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
3740 def rr : PDI<opc, MRMSrcReg,
3741 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3743 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3744 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3745 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
3746 def rm : PDI<opc, MRMSrcMem,
3747 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3749 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3750 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3751 [(set VR128:$dst, (OpNode VR128:$src1,
3752 (bc_frag (memopv2i64
3756 let Predicates = [HasAVX] in {
3757 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
3758 bc_v16i8, 0>, VEX_4V;
3759 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
3760 bc_v8i16, 0>, VEX_4V;
3761 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
3762 bc_v4i32, 0>, VEX_4V;
3764 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3765 /// knew to collapse (bitconvert VT to VT) into its operand.
3766 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3767 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3768 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3769 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3770 VR128:$src2)))]>, VEX_4V;
3771 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3772 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3773 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3774 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3775 (memopv2i64 addr:$src2))))]>, VEX_4V;
3777 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
3778 bc_v16i8, 0>, VEX_4V;
3779 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
3780 bc_v8i16, 0>, VEX_4V;
3781 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
3782 bc_v4i32, 0>, VEX_4V;
3784 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3785 /// knew to collapse (bitconvert VT to VT) into its operand.
3786 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3787 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3788 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3789 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3790 VR128:$src2)))]>, VEX_4V;
3791 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3792 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3793 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3794 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3795 (memopv2i64 addr:$src2))))]>, VEX_4V;
3798 let Constraints = "$src1 = $dst" in {
3799 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
3800 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
3801 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
3803 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3804 /// knew to collapse (bitconvert VT to VT) into its operand.
3805 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3806 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3807 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3809 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
3810 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3811 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3812 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3814 (v2i64 (X86Punpcklqdq VR128:$src1,
3815 (memopv2i64 addr:$src2))))]>;
3817 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
3818 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
3819 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
3821 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3822 /// knew to collapse (bitconvert VT to VT) into its operand.
3823 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3824 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3825 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3827 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
3828 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3829 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3830 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3832 (v2i64 (X86Punpckhqdq VR128:$src1,
3833 (memopv2i64 addr:$src2))))]>;
3836 } // ExeDomain = SSEPackedInt
3838 //===---------------------------------------------------------------------===//
3839 // SSE2 - Packed Integer Extract and Insert
3840 //===---------------------------------------------------------------------===//
3842 let ExeDomain = SSEPackedInt in {
3843 multiclass sse2_pinsrw<bit Is2Addr = 1> {
3844 def rri : Ii8<0xC4, MRMSrcReg,
3845 (outs VR128:$dst), (ins VR128:$src1,
3846 GR32:$src2, i32i8imm:$src3),
3848 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3849 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3851 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
3852 def rmi : Ii8<0xC4, MRMSrcMem,
3853 (outs VR128:$dst), (ins VR128:$src1,
3854 i16mem:$src2, i32i8imm:$src3),
3856 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3857 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3859 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
3864 let Predicates = [HasAVX] in
3865 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
3866 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3867 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3868 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3869 imm:$src2))]>, TB, OpSize, VEX;
3870 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
3871 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3872 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3873 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3877 let Predicates = [HasAVX] in {
3878 defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
3879 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
3880 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
3881 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
3882 []>, TB, OpSize, VEX_4V;
3885 let Constraints = "$src1 = $dst" in
3886 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
3888 } // ExeDomain = SSEPackedInt
3890 //===---------------------------------------------------------------------===//
3891 // SSE2 - Packed Mask Creation
3892 //===---------------------------------------------------------------------===//
3894 let ExeDomain = SSEPackedInt in {
3896 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3897 "pmovmskb\t{$src, $dst|$dst, $src}",
3898 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
3899 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
3900 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
3901 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3902 "pmovmskb\t{$src, $dst|$dst, $src}",
3903 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
3905 } // ExeDomain = SSEPackedInt
3907 //===---------------------------------------------------------------------===//
3908 // SSE2 - Conditional Store
3909 //===---------------------------------------------------------------------===//
3911 let ExeDomain = SSEPackedInt in {
3914 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
3915 (ins VR128:$src, VR128:$mask),
3916 "maskmovdqu\t{$mask, $src|$src, $mask}",
3917 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
3919 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
3920 (ins VR128:$src, VR128:$mask),
3921 "maskmovdqu\t{$mask, $src|$src, $mask}",
3922 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
3925 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3926 "maskmovdqu\t{$mask, $src|$src, $mask}",
3927 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
3929 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3930 "maskmovdqu\t{$mask, $src|$src, $mask}",
3931 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
3933 } // ExeDomain = SSEPackedInt
3935 //===---------------------------------------------------------------------===//
3936 // SSE2 - Move Doubleword
3937 //===---------------------------------------------------------------------===//
3939 //===---------------------------------------------------------------------===//
3940 // Move Int Doubleword to Packed Double Int
3942 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3943 "movd\t{$src, $dst|$dst, $src}",
3945 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
3946 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3947 "movd\t{$src, $dst|$dst, $src}",
3949 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
3951 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3952 "mov{d|q}\t{$src, $dst|$dst, $src}",
3954 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
3955 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3956 "mov{d|q}\t{$src, $dst|$dst, $src}",
3957 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
3959 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3960 "movd\t{$src, $dst|$dst, $src}",
3962 (v4i32 (scalar_to_vector GR32:$src)))]>;
3963 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3964 "movd\t{$src, $dst|$dst, $src}",
3966 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
3967 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3968 "mov{d|q}\t{$src, $dst|$dst, $src}",
3970 (v2i64 (scalar_to_vector GR64:$src)))]>;
3971 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3972 "mov{d|q}\t{$src, $dst|$dst, $src}",
3973 [(set FR64:$dst, (bitconvert GR64:$src))]>;
3975 //===---------------------------------------------------------------------===//
3976 // Move Int Doubleword to Single Scalar
3978 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3979 "movd\t{$src, $dst|$dst, $src}",
3980 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
3982 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3983 "movd\t{$src, $dst|$dst, $src}",
3984 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
3986 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3987 "movd\t{$src, $dst|$dst, $src}",
3988 [(set FR32:$dst, (bitconvert GR32:$src))]>;
3990 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3991 "movd\t{$src, $dst|$dst, $src}",
3992 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
3994 //===---------------------------------------------------------------------===//
3995 // Move Packed Doubleword Int to Packed Double Int
3997 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3998 "movd\t{$src, $dst|$dst, $src}",
3999 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4001 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
4002 (ins i32mem:$dst, VR128:$src),
4003 "movd\t{$src, $dst|$dst, $src}",
4004 [(store (i32 (vector_extract (v4i32 VR128:$src),
4005 (iPTR 0))), addr:$dst)]>, VEX;
4006 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4007 "movd\t{$src, $dst|$dst, $src}",
4008 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4010 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4011 "movd\t{$src, $dst|$dst, $src}",
4012 [(store (i32 (vector_extract (v4i32 VR128:$src),
4013 (iPTR 0))), addr:$dst)]>;
4015 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4016 "mov{d|q}\t{$src, $dst|$dst, $src}",
4017 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4019 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4020 "movq\t{$src, $dst|$dst, $src}",
4021 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
4023 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4024 "mov{d|q}\t{$src, $dst|$dst, $src}",
4025 [(set GR64:$dst, (bitconvert FR64:$src))]>;
4026 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4027 "movq\t{$src, $dst|$dst, $src}",
4028 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
4030 //===---------------------------------------------------------------------===//
4031 // Move Scalar Single to Double Int
4033 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4034 "movd\t{$src, $dst|$dst, $src}",
4035 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
4036 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4037 "movd\t{$src, $dst|$dst, $src}",
4038 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
4039 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4040 "movd\t{$src, $dst|$dst, $src}",
4041 [(set GR32:$dst, (bitconvert FR32:$src))]>;
4042 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4043 "movd\t{$src, $dst|$dst, $src}",
4044 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
4046 //===---------------------------------------------------------------------===//
4047 // Patterns and instructions to describe movd/movq to XMM register zero-extends
4049 let AddedComplexity = 15 in {
4050 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4051 "movd\t{$src, $dst|$dst, $src}",
4052 [(set VR128:$dst, (v4i32 (X86vzmovl
4053 (v4i32 (scalar_to_vector GR32:$src)))))]>,
4055 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4056 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4057 [(set VR128:$dst, (v2i64 (X86vzmovl
4058 (v2i64 (scalar_to_vector GR64:$src)))))]>,
4061 let AddedComplexity = 15 in {
4062 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4063 "movd\t{$src, $dst|$dst, $src}",
4064 [(set VR128:$dst, (v4i32 (X86vzmovl
4065 (v4i32 (scalar_to_vector GR32:$src)))))]>;
4066 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4067 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4068 [(set VR128:$dst, (v2i64 (X86vzmovl
4069 (v2i64 (scalar_to_vector GR64:$src)))))]>;
4072 let AddedComplexity = 20 in {
4073 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4074 "movd\t{$src, $dst|$dst, $src}",
4076 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4077 (loadi32 addr:$src))))))]>,
4079 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4080 "movd\t{$src, $dst|$dst, $src}",
4082 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4083 (loadi32 addr:$src))))))]>;
4085 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
4086 (MOVZDI2PDIrm addr:$src)>;
4087 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4088 (MOVZDI2PDIrm addr:$src)>;
4089 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4090 (MOVZDI2PDIrm addr:$src)>;
4093 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
4094 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4095 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4096 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
4097 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
4098 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4099 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
4100 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
4102 // These are the correct encodings of the instructions so that we know how to
4103 // read correct assembly, even though we continue to emit the wrong ones for
4104 // compatibility with Darwin's buggy assembler.
4105 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4106 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4107 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4108 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
4109 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4110 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4111 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4112 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
4113 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4114 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4115 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4116 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4118 //===---------------------------------------------------------------------===//
4119 // SSE2 - Move Quadword
4120 //===---------------------------------------------------------------------===//
4122 //===---------------------------------------------------------------------===//
4123 // Move Quadword Int to Packed Quadword Int
4125 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4126 "vmovq\t{$src, $dst|$dst, $src}",
4128 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4129 VEX, Requires<[HasAVX]>;
4130 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4131 "movq\t{$src, $dst|$dst, $src}",
4133 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4134 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
4136 //===---------------------------------------------------------------------===//
4137 // Move Packed Quadword Int to Quadword Int
4139 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4140 "movq\t{$src, $dst|$dst, $src}",
4141 [(store (i64 (vector_extract (v2i64 VR128:$src),
4142 (iPTR 0))), addr:$dst)]>, VEX;
4143 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4144 "movq\t{$src, $dst|$dst, $src}",
4145 [(store (i64 (vector_extract (v2i64 VR128:$src),
4146 (iPTR 0))), addr:$dst)]>;
4148 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
4149 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
4151 //===---------------------------------------------------------------------===//
4152 // Store / copy lower 64-bits of a XMM register.
4154 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4155 "movq\t{$src, $dst|$dst, $src}",
4156 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
4157 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4158 "movq\t{$src, $dst|$dst, $src}",
4159 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
4161 let AddedComplexity = 20 in
4162 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4163 "vmovq\t{$src, $dst|$dst, $src}",
4165 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4166 (loadi64 addr:$src))))))]>,
4167 XS, VEX, Requires<[HasAVX]>;
4169 let AddedComplexity = 20 in {
4170 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4171 "movq\t{$src, $dst|$dst, $src}",
4173 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4174 (loadi64 addr:$src))))))]>,
4175 XS, Requires<[HasSSE2]>;
4177 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4178 (MOVZQI2PQIrm addr:$src)>;
4179 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4180 (MOVZQI2PQIrm addr:$src)>;
4181 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
4184 //===---------------------------------------------------------------------===//
4185 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
4186 // IA32 document. movq xmm1, xmm2 does clear the high bits.
4188 let AddedComplexity = 15 in
4189 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4190 "vmovq\t{$src, $dst|$dst, $src}",
4191 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
4192 XS, VEX, Requires<[HasAVX]>;
4193 let AddedComplexity = 15 in
4194 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4195 "movq\t{$src, $dst|$dst, $src}",
4196 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
4197 XS, Requires<[HasSSE2]>;
4199 let AddedComplexity = 20 in
4200 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4201 "vmovq\t{$src, $dst|$dst, $src}",
4202 [(set VR128:$dst, (v2i64 (X86vzmovl
4203 (loadv2i64 addr:$src))))]>,
4204 XS, VEX, Requires<[HasAVX]>;
4205 let AddedComplexity = 20 in {
4206 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4207 "movq\t{$src, $dst|$dst, $src}",
4208 [(set VR128:$dst, (v2i64 (X86vzmovl
4209 (loadv2i64 addr:$src))))]>,
4210 XS, Requires<[HasSSE2]>;
4212 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
4213 (MOVZPQILo2PQIrm addr:$src)>;
4216 // Instructions to match in the assembler
4217 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4218 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
4219 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4220 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
4221 // Recognize "movd" with GR64 destination, but encode as a "movq"
4222 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4223 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
4225 // Instructions for the disassembler
4226 // xr = XMM register
4229 let Predicates = [HasAVX] in
4230 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4231 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
4232 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4233 "movq\t{$src, $dst|$dst, $src}", []>, XS;
4235 //===---------------------------------------------------------------------===//
4236 // SSE2 - Misc Instructions
4237 //===---------------------------------------------------------------------===//
4240 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
4241 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
4242 TB, Requires<[HasSSE2]>;
4244 // Load, store, and memory fence
4245 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
4246 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
4247 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
4248 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
4249 def : Pat<(X86LFence), (LFENCE)>;
4250 def : Pat<(X86MFence), (MFENCE)>;
4253 // Pause. This "instruction" is encoded as "rep; nop", so even though it
4254 // was introduced with SSE2, it's backward compatible.
4255 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
4257 // Alias instructions that map zero vector to pxor / xorp* for sse.
4258 // We set canFoldAsLoad because this can be converted to a constant-pool
4259 // load of an all-ones value if folding it would be beneficial.
4260 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
4261 // JIT implementation, it does not expand the instructions below like
4262 // X86MCInstLower does.
4263 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
4264 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
4265 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
4266 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
4267 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
4268 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
4269 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
4270 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
4272 //===---------------------------------------------------------------------===//
4273 // SSE3 - Conversion Instructions
4274 //===---------------------------------------------------------------------===//
4276 // Convert Packed Double FP to Packed DW Integers
4277 let Predicates = [HasAVX] in {
4278 // The assembler can recognize rr 256-bit instructions by seeing a ymm
4279 // register, but the same isn't true when using memory operands instead.
4280 // Provide other assembly rr and rm forms to address this explicitly.
4281 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4282 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
4283 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
4284 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
4287 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4288 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
4289 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
4290 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
4293 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
4294 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
4295 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
4296 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
4299 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
4300 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
4301 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4302 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
4304 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
4305 (VCVTPD2DQYrr VR256:$src)>;
4306 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
4307 (VCVTPD2DQYrm addr:$src)>;
4309 // Convert Packed DW Integers to Packed Double FP
4310 let Predicates = [HasAVX] in {
4311 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
4312 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
4313 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4314 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
4315 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
4316 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
4317 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
4318 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
4321 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
4322 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
4323 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4324 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
4326 // AVX 256-bit register conversion intrinsics
4327 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
4328 (VCVTDQ2PDYrr VR128:$src)>;
4329 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
4330 (VCVTDQ2PDYrm addr:$src)>;
4332 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
4333 (VCVTPD2DQYrr VR256:$src)>;
4334 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
4335 (VCVTPD2DQYrm addr:$src)>;
4337 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
4338 (VCVTDQ2PDYrr VR128:$src)>;
4339 def : Pat<(v4f64 (sint_to_fp (memopv4i32 addr:$src))),
4340 (VCVTDQ2PDYrm addr:$src)>;
4342 //===---------------------------------------------------------------------===//
4343 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
4344 //===---------------------------------------------------------------------===//
4345 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
4346 ValueType vt, RegisterClass RC, PatFrag mem_frag,
4347 X86MemOperand x86memop> {
4348 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4349 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4350 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
4351 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
4352 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4353 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
4356 let Predicates = [HasAVX] in {
4357 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4358 v4f32, VR128, memopv4f32, f128mem>, VEX;
4359 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4360 v4f32, VR128, memopv4f32, f128mem>, VEX;
4361 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4362 v8f32, VR256, memopv8f32, f256mem>, VEX;
4363 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4364 v8f32, VR256, memopv8f32, f256mem>, VEX;
4366 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
4367 memopv4f32, f128mem>;
4368 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
4369 memopv4f32, f128mem>;
4371 let Predicates = [HasSSE3] in {
4372 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4373 (MOVSHDUPrr VR128:$src)>;
4374 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4375 (MOVSHDUPrm addr:$src)>;
4376 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4377 (MOVSLDUPrr VR128:$src)>;
4378 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4379 (MOVSLDUPrm addr:$src)>;
4382 let Predicates = [HasAVX] in {
4383 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4384 (VMOVSHDUPrr VR128:$src)>;
4385 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4386 (VMOVSHDUPrm addr:$src)>;
4387 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4388 (VMOVSLDUPrr VR128:$src)>;
4389 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4390 (VMOVSLDUPrm addr:$src)>;
4391 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
4392 (VMOVSHDUPYrr VR256:$src)>;
4393 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
4394 (VMOVSHDUPYrm addr:$src)>;
4395 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
4396 (VMOVSLDUPYrr VR256:$src)>;
4397 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
4398 (VMOVSLDUPYrm addr:$src)>;
4401 //===---------------------------------------------------------------------===//
4402 // SSE3 - Replicate Double FP - MOVDDUP
4403 //===---------------------------------------------------------------------===//
4405 multiclass sse3_replicate_dfp<string OpcodeStr> {
4406 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4407 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4408 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
4409 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
4410 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4412 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
4416 // FIXME: Merge with above classe when there're patterns for the ymm version
4417 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
4418 let Predicates = [HasAVX] in {
4419 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
4420 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4422 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
4423 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4428 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
4429 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
4430 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
4432 let Predicates = [HasSSE3] in {
4433 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
4435 (MOVDDUPrm addr:$src)>;
4436 let AddedComplexity = 5 in {
4437 def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
4438 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
4439 (MOVDDUPrm addr:$src)>;
4440 def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
4441 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
4442 (MOVDDUPrm addr:$src)>;
4444 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4445 (MOVDDUPrm addr:$src)>;
4446 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4447 (MOVDDUPrm addr:$src)>;
4448 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4449 (MOVDDUPrm addr:$src)>;
4450 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
4451 (MOVDDUPrm addr:$src)>;
4452 def : Pat<(X86Movddup (bc_v2f64
4453 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4454 (MOVDDUPrm addr:$src)>;
4457 let Predicates = [HasAVX] in {
4458 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
4460 (VMOVDDUPrm addr:$src)>;
4461 let AddedComplexity = 5 in {
4462 def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
4463 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
4464 (VMOVDDUPrm addr:$src)>;
4465 def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
4466 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
4467 (VMOVDDUPrm addr:$src)>;
4469 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4470 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4471 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4472 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4473 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4474 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4475 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
4476 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4477 def : Pat<(X86Movddup (bc_v2f64
4478 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4479 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4482 def : Pat<(X86Movddup (memopv4f64 addr:$src)),
4483 (VMOVDDUPYrm addr:$src)>;
4484 def : Pat<(X86Movddup (memopv4i64 addr:$src)),
4485 (VMOVDDUPYrm addr:$src)>;
4486 def : Pat<(X86Movddup (v4f64 (scalar_to_vector (loadf64 addr:$src)))),
4487 (VMOVDDUPYrm addr:$src)>;
4488 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
4489 (VMOVDDUPYrm addr:$src)>;
4490 def : Pat<(X86Movddup (v4f64 VR256:$src)),
4491 (VMOVDDUPYrr VR256:$src)>;
4492 def : Pat<(X86Movddup (v4i64 VR256:$src)),
4493 (VMOVDDUPYrr VR256:$src)>;
4496 //===---------------------------------------------------------------------===//
4497 // SSE3 - Move Unaligned Integer
4498 //===---------------------------------------------------------------------===//
4500 let Predicates = [HasAVX] in {
4501 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4502 "vlddqu\t{$src, $dst|$dst, $src}",
4503 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
4504 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4505 "vlddqu\t{$src, $dst|$dst, $src}",
4506 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
4508 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4509 "lddqu\t{$src, $dst|$dst, $src}",
4510 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
4512 //===---------------------------------------------------------------------===//
4513 // SSE3 - Arithmetic
4514 //===---------------------------------------------------------------------===//
4516 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
4517 X86MemOperand x86memop, bit Is2Addr = 1> {
4518 def rr : I<0xD0, MRMSrcReg,
4519 (outs RC:$dst), (ins RC:$src1, RC:$src2),
4521 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4522 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4523 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
4524 def rm : I<0xD0, MRMSrcMem,
4525 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4527 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4528 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4529 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
4532 let Predicates = [HasAVX],
4533 ExeDomain = SSEPackedDouble in {
4534 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
4535 f128mem, 0>, TB, XD, VEX_4V;
4536 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
4537 f128mem, 0>, TB, OpSize, VEX_4V;
4538 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
4539 f256mem, 0>, TB, XD, VEX_4V;
4540 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
4541 f256mem, 0>, TB, OpSize, VEX_4V;
4543 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
4544 ExeDomain = SSEPackedDouble in {
4545 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
4547 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
4548 f128mem>, TB, OpSize;
4551 //===---------------------------------------------------------------------===//
4552 // SSE3 Instructions
4553 //===---------------------------------------------------------------------===//
4556 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4557 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
4558 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4560 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4561 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4562 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
4564 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4566 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4567 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4568 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
4570 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4571 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
4572 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4574 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4575 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4576 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
4578 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4580 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4581 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4582 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
4585 let Predicates = [HasAVX] in {
4586 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
4587 int_x86_sse3_hadd_ps, 0>, VEX_4V;
4588 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
4589 int_x86_sse3_hadd_pd, 0>, VEX_4V;
4590 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
4591 int_x86_sse3_hsub_ps, 0>, VEX_4V;
4592 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
4593 int_x86_sse3_hsub_pd, 0>, VEX_4V;
4594 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
4595 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
4596 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
4597 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
4598 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
4599 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
4600 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
4601 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
4604 let Constraints = "$src1 = $dst" in {
4605 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
4606 int_x86_sse3_hadd_ps>;
4607 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
4608 int_x86_sse3_hadd_pd>;
4609 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
4610 int_x86_sse3_hsub_ps>;
4611 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
4612 int_x86_sse3_hsub_pd>;
4615 //===---------------------------------------------------------------------===//
4616 // SSSE3 - Packed Absolute Instructions
4617 //===---------------------------------------------------------------------===//
4620 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
4621 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
4622 PatFrag mem_frag128, Intrinsic IntId128> {
4623 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4625 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4626 [(set VR128:$dst, (IntId128 VR128:$src))]>,
4629 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4631 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4634 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
4637 let Predicates = [HasAVX] in {
4638 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
4639 int_x86_ssse3_pabs_b_128>, VEX;
4640 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
4641 int_x86_ssse3_pabs_w_128>, VEX;
4642 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
4643 int_x86_ssse3_pabs_d_128>, VEX;
4646 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
4647 int_x86_ssse3_pabs_b_128>;
4648 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
4649 int_x86_ssse3_pabs_w_128>;
4650 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
4651 int_x86_ssse3_pabs_d_128>;
4653 //===---------------------------------------------------------------------===//
4654 // SSSE3 - Packed Binary Operator Instructions
4655 //===---------------------------------------------------------------------===//
4657 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
4658 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
4659 PatFrag mem_frag128, Intrinsic IntId128,
4661 let isCommutable = 1 in
4662 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4663 (ins VR128:$src1, VR128:$src2),
4665 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4666 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4667 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4669 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4670 (ins VR128:$src1, i128mem:$src2),
4672 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4673 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4675 (IntId128 VR128:$src1,
4676 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4679 let Predicates = [HasAVX] in {
4680 let isCommutable = 0 in {
4681 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
4682 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
4683 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
4684 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
4685 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
4686 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
4687 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
4688 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
4689 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
4690 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
4691 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
4692 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
4693 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
4694 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
4695 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
4696 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
4697 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
4698 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
4699 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
4700 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
4701 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
4702 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
4704 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
4705 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
4708 // None of these have i8 immediate fields.
4709 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
4710 let isCommutable = 0 in {
4711 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
4712 int_x86_ssse3_phadd_w_128>;
4713 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
4714 int_x86_ssse3_phadd_d_128>;
4715 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
4716 int_x86_ssse3_phadd_sw_128>;
4717 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
4718 int_x86_ssse3_phsub_w_128>;
4719 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
4720 int_x86_ssse3_phsub_d_128>;
4721 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
4722 int_x86_ssse3_phsub_sw_128>;
4723 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
4724 int_x86_ssse3_pmadd_ub_sw_128>;
4725 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
4726 int_x86_ssse3_pshuf_b_128>;
4727 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
4728 int_x86_ssse3_psign_b_128>;
4729 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
4730 int_x86_ssse3_psign_w_128>;
4731 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
4732 int_x86_ssse3_psign_d_128>;
4734 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
4735 int_x86_ssse3_pmul_hr_sw_128>;
4738 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
4739 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
4740 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
4741 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
4743 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
4744 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4745 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
4746 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4747 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
4748 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4750 //===---------------------------------------------------------------------===//
4751 // SSSE3 - Packed Align Instruction Patterns
4752 //===---------------------------------------------------------------------===//
4754 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
4755 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
4756 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4758 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4760 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4762 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
4763 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4765 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4767 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4771 let Predicates = [HasAVX] in
4772 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
4773 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
4774 defm PALIGN : ssse3_palign<"palignr">;
4776 let Predicates = [HasSSSE3] in {
4777 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4778 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4779 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4780 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4781 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4782 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4783 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4784 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4787 let Predicates = [HasAVX] in {
4788 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4789 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4790 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4791 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4792 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4793 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4794 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4795 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4798 //===---------------------------------------------------------------------===//
4799 // SSSE3 Misc Instructions
4800 //===---------------------------------------------------------------------===//
4802 // Thread synchronization
4803 let usesCustomInserter = 1 in {
4804 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
4805 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
4806 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
4807 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
4810 let Uses = [EAX, ECX, EDX] in
4811 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
4812 Requires<[HasSSE3]>;
4813 let Uses = [ECX, EAX] in
4814 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
4815 Requires<[HasSSE3]>;
4817 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
4818 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
4820 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
4821 Requires<[In32BitMode]>;
4822 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
4823 Requires<[In64BitMode]>;
4825 // extload f32 -> f64. This matches load+fextend because we have a hack in
4826 // the isel (PreprocessForFPConvert) that can introduce loads after dag
4828 // Since these loads aren't folded into the fextend, we have to match it
4830 let Predicates = [HasSSE2] in
4831 def : Pat<(fextend (loadf32 addr:$src)),
4832 (CVTSS2SDrm addr:$src)>;
4834 // Splat v2f64 / v2i64
4835 let AddedComplexity = 10 in {
4836 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
4837 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
4840 // Set lowest element and zero upper elements.
4841 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4842 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
4844 //===----------------------------------------------------------------------===//
4845 // SSE4.1 - Packed Move with Sign/Zero Extend
4846 //===----------------------------------------------------------------------===//
4848 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4849 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4850 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4851 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4853 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4854 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4856 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4860 let Predicates = [HasAVX] in {
4861 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4863 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4865 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4867 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4869 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4871 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4875 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4876 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4877 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4878 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4879 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4880 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4882 // Common patterns involving scalar load.
4883 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4884 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4885 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4886 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4888 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4889 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4890 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4891 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4893 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4894 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4895 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4896 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4898 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4899 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4900 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4901 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4903 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4904 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4905 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4906 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4908 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4909 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4910 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4911 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4914 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4915 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4916 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4917 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4919 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4920 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4922 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4926 let Predicates = [HasAVX] in {
4927 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4929 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4931 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4933 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4937 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4938 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4939 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4940 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4942 // Common patterns involving scalar load
4943 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4944 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4945 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4946 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4948 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4949 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4950 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4951 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4954 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4955 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4956 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4957 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4959 // Expecting a i16 load any extended to i32 value.
4960 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4961 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4962 [(set VR128:$dst, (IntId (bitconvert
4963 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4967 let Predicates = [HasAVX] in {
4968 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4970 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4973 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4974 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4976 // Common patterns involving scalar load
4977 def : Pat<(int_x86_sse41_pmovsxbq
4978 (bitconvert (v4i32 (X86vzmovl
4979 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4980 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4982 def : Pat<(int_x86_sse41_pmovzxbq
4983 (bitconvert (v4i32 (X86vzmovl
4984 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4985 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4987 //===----------------------------------------------------------------------===//
4988 // SSE4.1 - Extract Instructions
4989 //===----------------------------------------------------------------------===//
4991 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4992 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4993 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4994 (ins VR128:$src1, i32i8imm:$src2),
4995 !strconcat(OpcodeStr,
4996 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4997 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4999 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5000 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
5001 !strconcat(OpcodeStr,
5002 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5005 // There's an AssertZext in the way of writing the store pattern
5006 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5009 let Predicates = [HasAVX] in {
5010 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
5011 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
5012 (ins VR128:$src1, i32i8imm:$src2),
5013 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
5016 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
5019 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
5020 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
5021 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5022 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
5023 !strconcat(OpcodeStr,
5024 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5027 // There's an AssertZext in the way of writing the store pattern
5028 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5031 let Predicates = [HasAVX] in
5032 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
5034 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
5037 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5038 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
5039 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5040 (ins VR128:$src1, i32i8imm:$src2),
5041 !strconcat(OpcodeStr,
5042 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5044 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
5045 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5046 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
5047 !strconcat(OpcodeStr,
5048 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5049 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
5050 addr:$dst)]>, OpSize;
5053 let Predicates = [HasAVX] in
5054 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
5056 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
5058 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5059 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
5060 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
5061 (ins VR128:$src1, i32i8imm:$src2),
5062 !strconcat(OpcodeStr,
5063 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5065 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
5066 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5067 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
5068 !strconcat(OpcodeStr,
5069 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5070 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
5071 addr:$dst)]>, OpSize, REX_W;
5074 let Predicates = [HasAVX] in
5075 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
5077 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
5079 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
5081 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
5082 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5083 (ins VR128:$src1, i32i8imm:$src2),
5084 !strconcat(OpcodeStr,
5085 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5087 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
5089 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5090 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
5091 !strconcat(OpcodeStr,
5092 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5093 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
5094 addr:$dst)]>, OpSize;
5097 let Predicates = [HasAVX] in {
5098 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
5099 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
5100 (ins VR128:$src1, i32i8imm:$src2),
5101 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
5104 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
5106 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
5107 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
5110 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
5111 Requires<[HasSSE41]>;
5113 //===----------------------------------------------------------------------===//
5114 // SSE4.1 - Insert Instructions
5115 //===----------------------------------------------------------------------===//
5117 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
5118 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5119 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
5121 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5123 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5125 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
5126 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5127 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
5129 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5131 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5133 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
5134 imm:$src3))]>, OpSize;
5137 let Predicates = [HasAVX] in
5138 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
5139 let Constraints = "$src1 = $dst" in
5140 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
5142 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
5143 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5144 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
5146 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5148 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5150 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
5152 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5153 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
5155 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5157 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5159 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
5160 imm:$src3)))]>, OpSize;
5163 let Predicates = [HasAVX] in
5164 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
5165 let Constraints = "$src1 = $dst" in
5166 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
5168 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
5169 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5170 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
5172 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5174 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5176 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
5178 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5179 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
5181 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5183 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5185 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
5186 imm:$src3)))]>, OpSize;
5189 let Predicates = [HasAVX] in
5190 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
5191 let Constraints = "$src1 = $dst" in
5192 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
5194 // insertps has a few different modes, there's the first two here below which
5195 // are optimized inserts that won't zero arbitrary elements in the destination
5196 // vector. The next one matches the intrinsic and could zero arbitrary elements
5197 // in the target vector.
5198 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
5199 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5200 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
5202 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5204 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5206 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
5208 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5209 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
5211 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5213 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5215 (X86insrtps VR128:$src1,
5216 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
5217 imm:$src3))]>, OpSize;
5220 let Constraints = "$src1 = $dst" in
5221 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
5222 let Predicates = [HasAVX] in
5223 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
5225 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
5226 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
5228 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
5229 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
5230 Requires<[HasSSE41]>;
5232 //===----------------------------------------------------------------------===//
5233 // SSE4.1 - Round Instructions
5234 //===----------------------------------------------------------------------===//
5236 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
5237 X86MemOperand x86memop, RegisterClass RC,
5238 PatFrag mem_frag32, PatFrag mem_frag64,
5239 Intrinsic V4F32Int, Intrinsic V2F64Int> {
5240 // Intrinsic operation, reg.
5241 // Vector intrinsic operation, reg
5242 def PSr : SS4AIi8<opcps, MRMSrcReg,
5243 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
5244 !strconcat(OpcodeStr,
5245 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5246 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
5249 // Vector intrinsic operation, mem
5250 def PSm : Ii8<opcps, MRMSrcMem,
5251 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
5252 !strconcat(OpcodeStr,
5253 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5255 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
5257 Requires<[HasSSE41]>;
5259 // Vector intrinsic operation, reg
5260 def PDr : SS4AIi8<opcpd, MRMSrcReg,
5261 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
5262 !strconcat(OpcodeStr,
5263 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5264 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
5267 // Vector intrinsic operation, mem
5268 def PDm : SS4AIi8<opcpd, MRMSrcMem,
5269 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
5270 !strconcat(OpcodeStr,
5271 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5273 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
5277 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
5278 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
5279 // Intrinsic operation, reg.
5280 // Vector intrinsic operation, reg
5281 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
5282 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
5283 !strconcat(OpcodeStr,
5284 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5287 // Vector intrinsic operation, mem
5288 def PSm_AVX : Ii8<opcps, MRMSrcMem,
5289 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
5290 !strconcat(OpcodeStr,
5291 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5292 []>, TA, OpSize, Requires<[HasSSE41]>;
5294 // Vector intrinsic operation, reg
5295 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
5296 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
5297 !strconcat(OpcodeStr,
5298 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5301 // Vector intrinsic operation, mem
5302 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
5303 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
5304 !strconcat(OpcodeStr,
5305 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5309 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
5312 Intrinsic F64Int, bit Is2Addr = 1> {
5313 // Intrinsic operation, reg.
5314 def SSr : SS4AIi8<opcss, MRMSrcReg,
5315 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5317 !strconcat(OpcodeStr,
5318 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5319 !strconcat(OpcodeStr,
5320 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5321 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
5324 // Intrinsic operation, mem.
5325 def SSm : SS4AIi8<opcss, MRMSrcMem,
5326 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
5328 !strconcat(OpcodeStr,
5329 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5330 !strconcat(OpcodeStr,
5331 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5333 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
5336 // Intrinsic operation, reg.
5337 def SDr : SS4AIi8<opcsd, MRMSrcReg,
5338 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5340 !strconcat(OpcodeStr,
5341 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5342 !strconcat(OpcodeStr,
5343 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5344 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
5347 // Intrinsic operation, mem.
5348 def SDm : SS4AIi8<opcsd, MRMSrcMem,
5349 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
5351 !strconcat(OpcodeStr,
5352 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5353 !strconcat(OpcodeStr,
5354 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5356 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
5360 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
5362 // Intrinsic operation, reg.
5363 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
5364 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5365 !strconcat(OpcodeStr,
5366 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5369 // Intrinsic operation, mem.
5370 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
5371 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
5372 !strconcat(OpcodeStr,
5373 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5376 // Intrinsic operation, reg.
5377 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
5378 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5379 !strconcat(OpcodeStr,
5380 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5383 // Intrinsic operation, mem.
5384 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
5385 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
5386 !strconcat(OpcodeStr,
5387 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5391 // FP round - roundss, roundps, roundsd, roundpd
5392 let Predicates = [HasAVX] in {
5394 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
5395 memopv4f32, memopv2f64,
5396 int_x86_sse41_round_ps,
5397 int_x86_sse41_round_pd>, VEX;
5398 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
5399 memopv8f32, memopv4f64,
5400 int_x86_avx_round_ps_256,
5401 int_x86_avx_round_pd_256>, VEX;
5402 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
5403 int_x86_sse41_round_ss,
5404 int_x86_sse41_round_sd, 0>, VEX_4V;
5406 // Instructions for the assembler
5407 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
5409 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
5411 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
5414 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
5415 memopv4f32, memopv2f64,
5416 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
5417 let Constraints = "$src1 = $dst" in
5418 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
5419 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
5421 //===----------------------------------------------------------------------===//
5422 // SSE4.1 - Packed Bit Test
5423 //===----------------------------------------------------------------------===//
5425 // ptest instruction we'll lower to this in X86ISelLowering primarily from
5426 // the intel intrinsic that corresponds to this.
5427 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5428 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5429 "vptest\t{$src2, $src1|$src1, $src2}",
5430 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
5432 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5433 "vptest\t{$src2, $src1|$src1, $src2}",
5434 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
5437 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
5438 "vptest\t{$src2, $src1|$src1, $src2}",
5439 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
5441 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
5442 "vptest\t{$src2, $src1|$src1, $src2}",
5443 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
5447 let Defs = [EFLAGS] in {
5448 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5449 "ptest \t{$src2, $src1|$src1, $src2}",
5450 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
5452 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5453 "ptest \t{$src2, $src1|$src1, $src2}",
5454 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
5458 // The bit test instructions below are AVX only
5459 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
5460 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
5461 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
5462 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5463 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
5464 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
5465 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5466 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
5470 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5471 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
5472 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
5473 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
5474 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
5477 //===----------------------------------------------------------------------===//
5478 // SSE4.1 - Misc Instructions
5479 //===----------------------------------------------------------------------===//
5481 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
5482 "popcnt{w}\t{$src, $dst|$dst, $src}",
5483 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
5484 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
5485 "popcnt{w}\t{$src, $dst|$dst, $src}",
5486 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
5488 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
5489 "popcnt{l}\t{$src, $dst|$dst, $src}",
5490 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
5491 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
5492 "popcnt{l}\t{$src, $dst|$dst, $src}",
5493 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
5495 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
5496 "popcnt{q}\t{$src, $dst|$dst, $src}",
5497 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
5498 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
5499 "popcnt{q}\t{$src, $dst|$dst, $src}",
5500 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
5504 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
5505 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
5506 Intrinsic IntId128> {
5507 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5509 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5510 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
5511 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5513 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5516 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
5519 let Predicates = [HasAVX] in
5520 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
5521 int_x86_sse41_phminposuw>, VEX;
5522 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
5523 int_x86_sse41_phminposuw>;
5525 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
5526 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
5527 Intrinsic IntId128, bit Is2Addr = 1> {
5528 let isCommutable = 1 in
5529 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5530 (ins VR128:$src1, VR128:$src2),
5532 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5533 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5534 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
5535 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5536 (ins VR128:$src1, i128mem:$src2),
5538 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5539 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5541 (IntId128 VR128:$src1,
5542 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5545 let Predicates = [HasAVX] in {
5546 let isCommutable = 0 in
5547 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
5549 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
5551 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
5553 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
5555 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
5557 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
5559 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
5561 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
5563 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
5565 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
5567 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
5570 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5571 (VPCMPEQQrr VR128:$src1, VR128:$src2)>;
5572 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5573 (VPCMPEQQrm VR128:$src1, addr:$src2)>;
5576 let Constraints = "$src1 = $dst" in {
5577 let isCommutable = 0 in
5578 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
5579 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
5580 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
5581 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
5582 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
5583 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
5584 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
5585 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
5586 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
5587 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
5588 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
5591 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5592 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
5593 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5594 (PCMPEQQrm VR128:$src1, addr:$src2)>;
5596 /// SS48I_binop_rm - Simple SSE41 binary operator.
5597 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5598 ValueType OpVT, bit Is2Addr = 1> {
5599 let isCommutable = 1 in
5600 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5601 (ins VR128:$src1, VR128:$src2),
5603 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5604 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5605 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
5607 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5608 (ins VR128:$src1, i128mem:$src2),
5610 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5611 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5612 [(set VR128:$dst, (OpNode VR128:$src1,
5613 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
5617 let Predicates = [HasAVX] in
5618 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
5619 let Constraints = "$src1 = $dst" in
5620 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
5622 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
5623 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
5624 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
5625 X86MemOperand x86memop, bit Is2Addr = 1> {
5626 let isCommutable = 1 in
5627 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
5628 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
5630 !strconcat(OpcodeStr,
5631 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5632 !strconcat(OpcodeStr,
5633 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5634 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
5636 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
5637 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
5639 !strconcat(OpcodeStr,
5640 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5641 !strconcat(OpcodeStr,
5642 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5645 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
5649 let Predicates = [HasAVX] in {
5650 let isCommutable = 0 in {
5651 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
5652 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5653 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
5654 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5655 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
5656 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5657 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
5658 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5659 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
5660 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5661 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
5662 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5664 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
5665 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5666 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
5667 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5668 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
5669 VR256, memopv32i8, i256mem, 0>, VEX_4V;
5672 let Constraints = "$src1 = $dst" in {
5673 let isCommutable = 0 in {
5674 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
5675 VR128, memopv16i8, i128mem>;
5676 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
5677 VR128, memopv16i8, i128mem>;
5678 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
5679 VR128, memopv16i8, i128mem>;
5680 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
5681 VR128, memopv16i8, i128mem>;
5683 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
5684 VR128, memopv16i8, i128mem>;
5685 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
5686 VR128, memopv16i8, i128mem>;
5689 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
5690 let Predicates = [HasAVX] in {
5691 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
5692 RegisterClass RC, X86MemOperand x86memop,
5693 PatFrag mem_frag, Intrinsic IntId> {
5694 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
5695 (ins RC:$src1, RC:$src2, RC:$src3),
5696 !strconcat(OpcodeStr,
5697 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5698 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
5699 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5701 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
5702 (ins RC:$src1, x86memop:$src2, RC:$src3),
5703 !strconcat(OpcodeStr,
5704 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5706 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
5708 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5712 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
5713 memopv16i8, int_x86_sse41_blendvpd>;
5714 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
5715 memopv16i8, int_x86_sse41_blendvps>;
5716 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
5717 memopv16i8, int_x86_sse41_pblendvb>;
5718 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
5719 memopv32i8, int_x86_avx_blendv_pd_256>;
5720 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
5721 memopv32i8, int_x86_avx_blendv_ps_256>;
5723 /// SS41I_ternary_int - SSE 4.1 ternary operator
5724 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
5725 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5726 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5727 (ins VR128:$src1, VR128:$src2),
5728 !strconcat(OpcodeStr,
5729 "\t{$src2, $dst|$dst, $src2}"),
5730 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
5733 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5734 (ins VR128:$src1, i128mem:$src2),
5735 !strconcat(OpcodeStr,
5736 "\t{$src2, $dst|$dst, $src2}"),
5739 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
5743 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
5744 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
5745 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
5747 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
5748 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
5750 let Predicates = [HasAVX] in
5751 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5752 "vmovntdqa\t{$src, $dst|$dst, $src}",
5753 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5755 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5756 "movntdqa\t{$src, $dst|$dst, $src}",
5757 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5760 //===----------------------------------------------------------------------===//
5761 // SSE4.2 - Compare Instructions
5762 //===----------------------------------------------------------------------===//
5764 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
5765 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
5766 Intrinsic IntId128, bit Is2Addr = 1> {
5767 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
5768 (ins VR128:$src1, VR128:$src2),
5770 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5771 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5772 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5774 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
5775 (ins VR128:$src1, i128mem:$src2),
5777 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5778 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5780 (IntId128 VR128:$src1,
5781 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5784 let Predicates = [HasAVX] in {
5785 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
5788 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5789 (VPCMPGTQrr VR128:$src1, VR128:$src2)>;
5790 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5791 (VPCMPGTQrm VR128:$src1, addr:$src2)>;
5794 let Constraints = "$src1 = $dst" in
5795 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
5797 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5798 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
5799 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5800 (PCMPGTQrm VR128:$src1, addr:$src2)>;
5802 //===----------------------------------------------------------------------===//
5803 // SSE4.2 - String/text Processing Instructions
5804 //===----------------------------------------------------------------------===//
5806 // Packed Compare Implicit Length Strings, Return Mask
5807 multiclass pseudo_pcmpistrm<string asm> {
5808 def REG : PseudoI<(outs VR128:$dst),
5809 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5810 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5812 def MEM : PseudoI<(outs VR128:$dst),
5813 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5814 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5815 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5818 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5819 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5820 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5823 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
5824 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5825 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5826 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5827 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5828 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5829 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5832 let Defs = [XMM0, EFLAGS] in {
5833 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5834 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5835 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5836 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5837 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5838 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5841 // Packed Compare Explicit Length Strings, Return Mask
5842 multiclass pseudo_pcmpestrm<string asm> {
5843 def REG : PseudoI<(outs VR128:$dst),
5844 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5845 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5846 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5847 def MEM : PseudoI<(outs VR128:$dst),
5848 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5849 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5850 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5853 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5854 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5855 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5858 let Predicates = [HasAVX],
5859 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5860 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5861 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5862 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5863 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5864 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5865 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5868 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5869 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5870 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5871 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5872 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5873 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5874 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5877 // Packed Compare Implicit Length Strings, Return Index
5878 let Defs = [ECX, EFLAGS] in {
5879 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5880 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5881 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5882 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5883 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5884 (implicit EFLAGS)]>, OpSize;
5885 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5886 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5887 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5888 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5889 (implicit EFLAGS)]>, OpSize;
5893 let Predicates = [HasAVX] in {
5894 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5896 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5898 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5900 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5902 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5904 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5908 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5909 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5910 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5911 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5912 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5913 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5915 // Packed Compare Explicit Length Strings, Return Index
5916 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5917 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5918 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5919 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5920 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5921 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5922 (implicit EFLAGS)]>, OpSize;
5923 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5924 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5925 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5927 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5928 (implicit EFLAGS)]>, OpSize;
5932 let Predicates = [HasAVX] in {
5933 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5935 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5937 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5939 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5941 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5943 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5947 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5948 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5949 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5950 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5951 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5952 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5954 //===----------------------------------------------------------------------===//
5955 // SSE4.2 - CRC Instructions
5956 //===----------------------------------------------------------------------===//
5958 // No CRC instructions have AVX equivalents
5960 // crc intrinsic instruction
5961 // This set of instructions are only rm, the only difference is the size
5963 let Constraints = "$src1 = $dst" in {
5964 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5965 (ins GR32:$src1, i8mem:$src2),
5966 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5968 (int_x86_sse42_crc32_32_8 GR32:$src1,
5969 (load addr:$src2)))]>;
5970 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5971 (ins GR32:$src1, GR8:$src2),
5972 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5974 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5975 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5976 (ins GR32:$src1, i16mem:$src2),
5977 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5979 (int_x86_sse42_crc32_32_16 GR32:$src1,
5980 (load addr:$src2)))]>,
5982 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5983 (ins GR32:$src1, GR16:$src2),
5984 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5986 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5988 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5989 (ins GR32:$src1, i32mem:$src2),
5990 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5992 (int_x86_sse42_crc32_32_32 GR32:$src1,
5993 (load addr:$src2)))]>;
5994 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5995 (ins GR32:$src1, GR32:$src2),
5996 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5998 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5999 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
6000 (ins GR64:$src1, i8mem:$src2),
6001 "crc32{b} \t{$src2, $src1|$src1, $src2}",
6003 (int_x86_sse42_crc32_64_8 GR64:$src1,
6004 (load addr:$src2)))]>,
6006 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
6007 (ins GR64:$src1, GR8:$src2),
6008 "crc32{b} \t{$src2, $src1|$src1, $src2}",
6010 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
6012 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
6013 (ins GR64:$src1, i64mem:$src2),
6014 "crc32{q} \t{$src2, $src1|$src1, $src2}",
6016 (int_x86_sse42_crc32_64_64 GR64:$src1,
6017 (load addr:$src2)))]>,
6019 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
6020 (ins GR64:$src1, GR64:$src2),
6021 "crc32{q} \t{$src2, $src1|$src1, $src2}",
6023 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
6027 //===----------------------------------------------------------------------===//
6028 // AES-NI Instructions
6029 //===----------------------------------------------------------------------===//
6031 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
6032 Intrinsic IntId128, bit Is2Addr = 1> {
6033 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
6034 (ins VR128:$src1, VR128:$src2),
6036 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6037 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6038 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
6040 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
6041 (ins VR128:$src1, i128mem:$src2),
6043 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6044 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6046 (IntId128 VR128:$src1,
6047 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
6050 // Perform One Round of an AES Encryption/Decryption Flow
6051 let Predicates = [HasAVX, HasAES] in {
6052 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
6053 int_x86_aesni_aesenc, 0>, VEX_4V;
6054 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
6055 int_x86_aesni_aesenclast, 0>, VEX_4V;
6056 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
6057 int_x86_aesni_aesdec, 0>, VEX_4V;
6058 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
6059 int_x86_aesni_aesdeclast, 0>, VEX_4V;
6062 let Constraints = "$src1 = $dst" in {
6063 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
6064 int_x86_aesni_aesenc>;
6065 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
6066 int_x86_aesni_aesenclast>;
6067 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
6068 int_x86_aesni_aesdec>;
6069 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
6070 int_x86_aesni_aesdeclast>;
6073 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
6074 (AESENCrr VR128:$src1, VR128:$src2)>;
6075 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
6076 (AESENCrm VR128:$src1, addr:$src2)>;
6077 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
6078 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
6079 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
6080 (AESENCLASTrm VR128:$src1, addr:$src2)>;
6081 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
6082 (AESDECrr VR128:$src1, VR128:$src2)>;
6083 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
6084 (AESDECrm VR128:$src1, addr:$src2)>;
6085 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
6086 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
6087 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
6088 (AESDECLASTrm VR128:$src1, addr:$src2)>;
6090 // Perform the AES InvMixColumn Transformation
6091 let Predicates = [HasAVX, HasAES] in {
6092 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
6094 "vaesimc\t{$src1, $dst|$dst, $src1}",
6096 (int_x86_aesni_aesimc VR128:$src1))]>,
6098 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
6099 (ins i128mem:$src1),
6100 "vaesimc\t{$src1, $dst|$dst, $src1}",
6102 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
6105 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
6107 "aesimc\t{$src1, $dst|$dst, $src1}",
6109 (int_x86_aesni_aesimc VR128:$src1))]>,
6111 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
6112 (ins i128mem:$src1),
6113 "aesimc\t{$src1, $dst|$dst, $src1}",
6115 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
6118 // AES Round Key Generation Assist
6119 let Predicates = [HasAVX, HasAES] in {
6120 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
6121 (ins VR128:$src1, i8imm:$src2),
6122 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6124 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
6126 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
6127 (ins i128mem:$src1, i8imm:$src2),
6128 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6130 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
6134 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
6135 (ins VR128:$src1, i8imm:$src2),
6136 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6138 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
6140 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
6141 (ins i128mem:$src1, i8imm:$src2),
6142 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6144 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
6148 //===----------------------------------------------------------------------===//
6149 // CLMUL Instructions
6150 //===----------------------------------------------------------------------===//
6152 // Carry-less Multiplication instructions
6153 let Constraints = "$src1 = $dst" in {
6154 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
6155 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6156 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6159 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
6160 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6161 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6165 // AVX carry-less Multiplication instructions
6166 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
6167 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6168 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6171 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
6172 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6173 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6177 multiclass pclmul_alias<string asm, int immop> {
6178 def : InstAlias<!strconcat("pclmul", asm,
6179 "dq {$src, $dst|$dst, $src}"),
6180 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
6182 def : InstAlias<!strconcat("pclmul", asm,
6183 "dq {$src, $dst|$dst, $src}"),
6184 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
6186 def : InstAlias<!strconcat("vpclmul", asm,
6187 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
6188 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
6190 def : InstAlias<!strconcat("vpclmul", asm,
6191 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
6192 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
6194 defm : pclmul_alias<"hqhq", 0x11>;
6195 defm : pclmul_alias<"hqlq", 0x01>;
6196 defm : pclmul_alias<"lqhq", 0x10>;
6197 defm : pclmul_alias<"lqlq", 0x00>;
6199 //===----------------------------------------------------------------------===//
6201 //===----------------------------------------------------------------------===//
6203 //===----------------------------------------------------------------------===//
6204 // VBROADCAST - Load from memory and broadcast to all elements of the
6205 // destination operand
6207 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
6208 X86MemOperand x86memop, Intrinsic Int> :
6209 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
6210 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6211 [(set RC:$dst, (Int addr:$src))]>, VEX;
6213 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
6214 int_x86_avx_vbroadcastss>;
6215 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
6216 int_x86_avx_vbroadcastss_256>;
6217 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
6218 int_x86_avx_vbroadcast_sd_256>;
6219 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
6220 int_x86_avx_vbroadcastf128_pd_256>;
6222 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
6223 (VBROADCASTF128 addr:$src)>;
6225 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
6226 (VBROADCASTSSY addr:$src)>;
6227 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
6228 (VBROADCASTSD addr:$src)>;
6229 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
6230 (VBROADCASTSSY addr:$src)>;
6231 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
6232 (VBROADCASTSD addr:$src)>;
6234 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
6235 (VBROADCASTSS addr:$src)>;
6236 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
6237 (VBROADCASTSS addr:$src)>;
6239 //===----------------------------------------------------------------------===//
6240 // VINSERTF128 - Insert packed floating-point values
6242 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
6243 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
6244 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6246 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
6247 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
6248 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6251 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
6252 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
6253 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
6254 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
6255 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
6256 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
6258 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
6260 (VINSERTF128rr VR256:$src1, VR128:$src2,
6261 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6262 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
6264 (VINSERTF128rr VR256:$src1, VR128:$src2,
6265 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6266 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
6268 (VINSERTF128rr VR256:$src1, VR128:$src2,
6269 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6270 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
6272 (VINSERTF128rr VR256:$src1, VR128:$src2,
6273 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6274 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
6276 (VINSERTF128rr VR256:$src1, VR128:$src2,
6277 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6278 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
6280 (VINSERTF128rr VR256:$src1, VR128:$src2,
6281 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6283 //===----------------------------------------------------------------------===//
6284 // VEXTRACTF128 - Extract packed floating-point values
6286 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
6287 (ins VR256:$src1, i8imm:$src2),
6288 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6290 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
6291 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
6292 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6295 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
6296 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
6297 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
6298 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
6299 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
6300 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
6302 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6303 (v4f32 (VEXTRACTF128rr
6304 (v8f32 VR256:$src1),
6305 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6306 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6307 (v2f64 (VEXTRACTF128rr
6308 (v4f64 VR256:$src1),
6309 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6310 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6311 (v4i32 (VEXTRACTF128rr
6312 (v8i32 VR256:$src1),
6313 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6314 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6315 (v2i64 (VEXTRACTF128rr
6316 (v4i64 VR256:$src1),
6317 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6318 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6319 (v8i16 (VEXTRACTF128rr
6320 (v16i16 VR256:$src1),
6321 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6322 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6323 (v16i8 (VEXTRACTF128rr
6324 (v32i8 VR256:$src1),
6325 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6327 //===----------------------------------------------------------------------===//
6328 // VMASKMOV - Conditional SIMD Packed Loads and Stores
6330 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
6331 Intrinsic IntLd, Intrinsic IntLd256,
6332 Intrinsic IntSt, Intrinsic IntSt256,
6333 PatFrag pf128, PatFrag pf256> {
6334 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
6335 (ins VR128:$src1, f128mem:$src2),
6336 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6337 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
6339 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
6340 (ins VR256:$src1, f256mem:$src2),
6341 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6342 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
6344 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
6345 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
6346 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6347 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
6348 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
6349 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
6350 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6351 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
6354 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
6355 int_x86_avx_maskload_ps,
6356 int_x86_avx_maskload_ps_256,
6357 int_x86_avx_maskstore_ps,
6358 int_x86_avx_maskstore_ps_256,
6359 memopv4f32, memopv8f32>;
6360 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
6361 int_x86_avx_maskload_pd,
6362 int_x86_avx_maskload_pd_256,
6363 int_x86_avx_maskstore_pd,
6364 int_x86_avx_maskstore_pd_256,
6365 memopv2f64, memopv4f64>;
6367 //===----------------------------------------------------------------------===//
6368 // VPERMIL - Permute Single and Double Floating-Point Values
6370 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
6371 RegisterClass RC, X86MemOperand x86memop_f,
6372 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
6373 Intrinsic IntVar, Intrinsic IntImm> {
6374 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
6375 (ins RC:$src1, RC:$src2),
6376 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6377 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
6378 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
6379 (ins RC:$src1, x86memop_i:$src2),
6380 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6381 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
6383 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
6384 (ins RC:$src1, i8imm:$src2),
6385 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6386 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
6387 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
6388 (ins x86memop_f:$src1, i8imm:$src2),
6389 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6390 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
6393 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
6394 memopv4f32, memopv4i32,
6395 int_x86_avx_vpermilvar_ps,
6396 int_x86_avx_vpermil_ps>;
6397 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
6398 memopv8f32, memopv8i32,
6399 int_x86_avx_vpermilvar_ps_256,
6400 int_x86_avx_vpermil_ps_256>;
6401 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
6402 memopv2f64, memopv2i64,
6403 int_x86_avx_vpermilvar_pd,
6404 int_x86_avx_vpermil_pd>;
6405 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
6406 memopv4f64, memopv4i64,
6407 int_x86_avx_vpermilvar_pd_256,
6408 int_x86_avx_vpermil_pd_256>;
6410 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
6411 (VPERMILPSYri VR256:$src1, imm:$imm)>;
6412 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
6413 (VPERMILPDYri VR256:$src1, imm:$imm)>;
6414 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
6415 (VPERMILPSYri VR256:$src1, imm:$imm)>;
6416 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
6417 (VPERMILPDYri VR256:$src1, imm:$imm)>;
6419 //===----------------------------------------------------------------------===//
6420 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
6422 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
6423 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
6424 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6426 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
6427 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
6428 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6431 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
6432 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6433 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
6434 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6435 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
6436 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6438 def : Pat<(int_x86_avx_vperm2f128_ps_256
6439 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
6440 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6441 def : Pat<(int_x86_avx_vperm2f128_pd_256
6442 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
6443 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6444 def : Pat<(int_x86_avx_vperm2f128_si_256
6445 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
6446 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6448 def : Pat<(v8f32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6449 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6450 def : Pat<(v8i32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6451 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6452 def : Pat<(v4i64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6453 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6454 def : Pat<(v4f64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6455 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6456 def : Pat<(v32i8 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6457 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6458 def : Pat<(v16i16 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6459 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6461 //===----------------------------------------------------------------------===//
6462 // VZERO - Zero YMM registers
6464 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
6465 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
6466 // Zero All YMM registers
6467 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
6468 [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
6470 // Zero Upper bits of YMM registers
6471 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
6472 [(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;