1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // Non-instruction patterns
120 //===----------------------------------------------------------------------===//
122 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
123 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
125 // Implicitly promote a 32-bit scalar to a vector.
126 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
127 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
128 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
129 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
130 // Implicitly promote a 64-bit scalar to a vector.
131 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
132 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
133 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
134 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
136 // Bitcasts between 128-bit vector types. Return the original type since
137 // no instruction is needed for the conversion
138 let Predicates = [HasXMMInt] in {
139 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
140 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
141 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
142 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
143 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
144 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
145 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
146 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
147 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
148 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
149 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
150 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
151 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
152 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
153 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
154 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
155 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
156 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
157 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
158 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
159 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
160 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
161 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
162 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
163 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
164 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
165 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
166 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
167 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
168 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
171 // Bitcasts between 256-bit vector types. Return the original type since
172 // no instruction is needed for the conversion
173 let Predicates = [HasAVX] in {
174 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
175 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
176 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
177 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
178 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
179 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
180 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
181 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
182 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
183 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
184 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
185 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
186 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
187 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
188 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
189 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
190 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
191 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
192 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
193 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
194 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
195 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
196 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
197 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
198 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
199 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
200 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
201 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
202 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
203 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
206 //===----------------------------------------------------------------------===//
207 // AVX & SSE - Zero/One Vectors
208 //===----------------------------------------------------------------------===//
210 // Alias instructions that map zero vector to pxor / xorp* for sse.
211 // We set canFoldAsLoad because this can be converted to a constant-pool
212 // load of an all-zeros value if folding it would be beneficial.
213 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
214 // JIT implementation, it does not expand the instructions below like
215 // X86MCInstLower does.
216 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
217 isCodeGenOnly = 1 in {
218 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
219 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
220 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
221 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
222 let ExeDomain = SSEPackedInt in
223 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
224 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
227 // The same as done above but for AVX. The 128-bit versions are the
228 // same, but re-encoded. The 256-bit does not support PI version, and
229 // doesn't need it because on sandy bridge the register is set to zero
230 // at the rename stage without using any execution unit, so SET0PSY
231 // and SET0PDY can be used for vector int instructions without penalty
232 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
233 // JIT implementatioan, it does not expand the instructions below like
234 // X86MCInstLower does.
235 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
236 isCodeGenOnly = 1, Predicates = [HasAVX] in {
237 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
238 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
239 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
240 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
241 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
242 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
243 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
244 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
245 let ExeDomain = SSEPackedInt in
246 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
247 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
250 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
251 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
252 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
254 // AVX has no support for 256-bit integer instructions, but since the 128-bit
255 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
256 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
257 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
258 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
260 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
261 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
262 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
264 //===----------------------------------------------------------------------===//
265 // SSE 1 & 2 - Move Instructions
266 //===----------------------------------------------------------------------===//
268 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
269 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
270 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
272 // Loading from memory automatically zeroing upper bits.
273 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
274 PatFrag mem_pat, string OpcodeStr> :
275 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
276 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
277 [(set RC:$dst, (mem_pat addr:$src))]>;
279 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
280 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
281 // is used instead. Register-to-register movss/movsd is not modeled as an
282 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
283 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
284 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
285 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
286 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
287 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
289 let canFoldAsLoad = 1, isReMaterializable = 1 in {
290 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
292 let AddedComplexity = 20 in
293 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
296 let Constraints = "$src1 = $dst" in {
297 def MOVSSrr : sse12_move_rr<FR32, v4f32,
298 "movss\t{$src2, $dst|$dst, $src2}">, XS;
299 def MOVSDrr : sse12_move_rr<FR64, v2f64,
300 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
303 let canFoldAsLoad = 1, isReMaterializable = 1 in {
304 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
306 let AddedComplexity = 20 in
307 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
310 let AddedComplexity = 15 in {
311 // Extract the low 32-bit value from one vector and insert it into another.
312 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
313 (MOVSSrr (v4f32 VR128:$src1),
314 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
315 // Extract the low 64-bit value from one vector and insert it into another.
316 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
317 (MOVSDrr (v2f64 VR128:$src1),
318 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
321 let AddedComplexity = 20 in {
322 let Predicates = [HasSSE1] in {
323 // MOVSSrm zeros the high parts of the register; represent this
324 // with SUBREG_TO_REG.
325 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
326 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
327 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
328 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
329 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
330 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
332 let Predicates = [HasSSE2] in {
333 // MOVSDrm zeros the high parts of the register; represent this
334 // with SUBREG_TO_REG.
335 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
336 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
337 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
338 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
339 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
340 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
341 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
342 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
343 def : Pat<(v2f64 (X86vzload addr:$src)),
344 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
348 let AddedComplexity = 20, Predicates = [HasAVX] in {
349 // MOVSSrm zeros the high parts of the register; represent this
350 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
351 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
352 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
353 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
354 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
355 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
356 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
357 // MOVSDrm zeros the high parts of the register; represent this
358 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
359 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
360 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
361 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
362 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
363 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
364 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
365 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
366 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
367 def : Pat<(v2f64 (X86vzload addr:$src)),
368 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
369 // Represent the same patterns above but in the form they appear for
371 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
372 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
373 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
374 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
375 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
376 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
379 // Store scalar value to memory.
380 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
381 "movss\t{$src, $dst|$dst, $src}",
382 [(store FR32:$src, addr:$dst)]>;
383 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
384 "movsd\t{$src, $dst|$dst, $src}",
385 [(store FR64:$src, addr:$dst)]>;
387 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
388 "movss\t{$src, $dst|$dst, $src}",
389 [(store FR32:$src, addr:$dst)]>, XS, VEX;
390 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
391 "movsd\t{$src, $dst|$dst, $src}",
392 [(store FR64:$src, addr:$dst)]>, XD, VEX;
394 // Extract and store.
395 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
398 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
399 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
402 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
404 // Move Aligned/Unaligned floating point values
405 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
406 X86MemOperand x86memop, PatFrag ld_frag,
407 string asm, Domain d,
408 bit IsReMaterializable = 1> {
409 let neverHasSideEffects = 1 in
410 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
411 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
412 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
413 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
414 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
415 [(set RC:$dst, (ld_frag addr:$src))], d>;
418 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
419 "movaps", SSEPackedSingle>, TB, VEX;
420 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
421 "movapd", SSEPackedDouble>, TB, OpSize, VEX;
422 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
423 "movups", SSEPackedSingle>, TB, VEX;
424 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
425 "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
427 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
428 "movaps", SSEPackedSingle>, TB, VEX;
429 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
430 "movapd", SSEPackedDouble>, TB, OpSize, VEX;
431 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
432 "movups", SSEPackedSingle>, TB, VEX;
433 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
434 "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
435 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
436 "movaps", SSEPackedSingle>, TB;
437 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
438 "movapd", SSEPackedDouble>, TB, OpSize;
439 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
440 "movups", SSEPackedSingle>, TB;
441 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
442 "movupd", SSEPackedDouble, 0>, TB, OpSize;
444 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
445 "movaps\t{$src, $dst|$dst, $src}",
446 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
447 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
448 "movapd\t{$src, $dst|$dst, $src}",
449 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
450 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
451 "movups\t{$src, $dst|$dst, $src}",
452 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
453 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
454 "movupd\t{$src, $dst|$dst, $src}",
455 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
456 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
457 "movaps\t{$src, $dst|$dst, $src}",
458 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
459 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
460 "movapd\t{$src, $dst|$dst, $src}",
461 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
462 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
463 "movups\t{$src, $dst|$dst, $src}",
464 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
465 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
466 "movupd\t{$src, $dst|$dst, $src}",
467 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
469 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
470 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
471 (VMOVUPSYmr addr:$dst, VR256:$src)>;
473 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
474 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
475 (VMOVUPDYmr addr:$dst, VR256:$src)>;
477 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
478 "movaps\t{$src, $dst|$dst, $src}",
479 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
480 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
481 "movapd\t{$src, $dst|$dst, $src}",
482 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
483 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
484 "movups\t{$src, $dst|$dst, $src}",
485 [(store (v4f32 VR128:$src), addr:$dst)]>;
486 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
487 "movupd\t{$src, $dst|$dst, $src}",
488 [(store (v2f64 VR128:$src), addr:$dst)]>;
490 // Intrinsic forms of MOVUPS/D load and store
491 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
492 (ins f128mem:$dst, VR128:$src),
493 "movups\t{$src, $dst|$dst, $src}",
494 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
495 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
496 (ins f128mem:$dst, VR128:$src),
497 "movupd\t{$src, $dst|$dst, $src}",
498 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
500 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
501 "movups\t{$src, $dst|$dst, $src}",
502 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
503 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
504 "movupd\t{$src, $dst|$dst, $src}",
505 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
507 // Move Low/High packed floating point values
508 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
509 PatFrag mov_frag, string base_opc,
511 def PSrm : PI<opc, MRMSrcMem,
512 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
513 !strconcat(base_opc, "s", asm_opr),
516 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
517 SSEPackedSingle>, TB;
519 def PDrm : PI<opc, MRMSrcMem,
520 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
521 !strconcat(base_opc, "d", asm_opr),
522 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
523 (scalar_to_vector (loadf64 addr:$src2)))))],
524 SSEPackedDouble>, TB, OpSize;
527 let AddedComplexity = 20 in {
528 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
529 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
530 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
531 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
533 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
534 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
535 "\t{$src2, $dst|$dst, $src2}">;
536 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
537 "\t{$src2, $dst|$dst, $src2}">;
540 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
541 "movlps\t{$src, $dst|$dst, $src}",
542 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
543 (iPTR 0))), addr:$dst)]>, VEX;
544 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
545 "movlpd\t{$src, $dst|$dst, $src}",
546 [(store (f64 (vector_extract (v2f64 VR128:$src),
547 (iPTR 0))), addr:$dst)]>, VEX;
548 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
549 "movlps\t{$src, $dst|$dst, $src}",
550 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
551 (iPTR 0))), addr:$dst)]>;
552 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
553 "movlpd\t{$src, $dst|$dst, $src}",
554 [(store (f64 (vector_extract (v2f64 VR128:$src),
555 (iPTR 0))), addr:$dst)]>;
557 // v2f64 extract element 1 is always custom lowered to unpack high to low
558 // and extract element 0 so the non-store version isn't too horrible.
559 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
560 "movhps\t{$src, $dst|$dst, $src}",
561 [(store (f64 (vector_extract
562 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
563 (undef)), (iPTR 0))), addr:$dst)]>,
565 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
566 "movhpd\t{$src, $dst|$dst, $src}",
567 [(store (f64 (vector_extract
568 (v2f64 (unpckh VR128:$src, (undef))),
569 (iPTR 0))), addr:$dst)]>,
571 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
572 "movhps\t{$src, $dst|$dst, $src}",
573 [(store (f64 (vector_extract
574 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
575 (undef)), (iPTR 0))), addr:$dst)]>;
576 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
577 "movhpd\t{$src, $dst|$dst, $src}",
578 [(store (f64 (vector_extract
579 (v2f64 (unpckh VR128:$src, (undef))),
580 (iPTR 0))), addr:$dst)]>;
582 let AddedComplexity = 20 in {
583 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
584 (ins VR128:$src1, VR128:$src2),
585 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
587 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
589 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
590 (ins VR128:$src1, VR128:$src2),
591 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
593 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
596 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
597 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
598 (ins VR128:$src1, VR128:$src2),
599 "movlhps\t{$src2, $dst|$dst, $src2}",
601 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
602 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
603 (ins VR128:$src1, VR128:$src2),
604 "movhlps\t{$src2, $dst|$dst, $src2}",
606 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
609 let Predicates = [HasAVX] in {
611 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
612 (VMOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
613 def : Pat<(X86Movlhps VR128:$src1,
614 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
615 (VMOVHPSrm VR128:$src1, addr:$src2)>;
616 def : Pat<(X86Movlhps VR128:$src1,
617 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
618 (VMOVHPSrm VR128:$src1, addr:$src2)>;
621 let AddedComplexity = 20 in {
622 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
623 (VMOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
624 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
625 (VMOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
627 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
628 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
629 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
631 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
632 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
633 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
634 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
635 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
636 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
639 let AddedComplexity = 20 in {
640 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
641 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
642 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
644 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
645 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
646 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
647 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
648 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
651 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
652 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
653 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
654 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
657 let Predicates = [HasSSE1] in {
659 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
660 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
661 def : Pat<(X86Movlhps VR128:$src1,
662 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
663 (MOVHPSrm VR128:$src1, addr:$src2)>;
664 def : Pat<(X86Movlhps VR128:$src1,
665 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
666 (MOVHPSrm VR128:$src1, addr:$src2)>;
669 let AddedComplexity = 20 in {
670 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
671 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
672 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
673 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
675 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
676 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
677 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
679 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
680 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
681 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
682 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
683 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
684 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
687 let AddedComplexity = 20 in {
688 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
689 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
690 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
692 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
693 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
694 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
695 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
696 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
699 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
700 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
701 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
702 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
705 //===----------------------------------------------------------------------===//
706 // SSE 1 & 2 - Conversion Instructions
707 //===----------------------------------------------------------------------===//
709 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
710 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
712 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
713 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
714 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
715 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
718 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
719 X86MemOperand x86memop, string asm> {
720 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
722 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
726 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
727 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
728 string asm, Domain d> {
729 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
730 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
731 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
732 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
735 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
736 X86MemOperand x86memop, string asm> {
737 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
738 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
739 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
740 (ins DstRC:$src1, x86memop:$src),
741 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
744 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
745 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
746 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
747 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
749 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
750 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
751 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
752 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
755 // The assembler can recognize rr 64-bit instructions by seeing a rxx
756 // register, but the same isn't true when only using memory operands,
757 // provide other assembly "l" and "q" forms to address this explicitly
758 // where appropriate to do so.
759 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
761 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
763 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
765 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
767 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
770 let Predicates = [HasAVX] in {
771 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
772 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
773 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
774 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
775 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
776 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
777 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
778 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
780 def : Pat<(f32 (sint_to_fp GR32:$src)),
781 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
782 def : Pat<(f32 (sint_to_fp GR64:$src)),
783 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
784 def : Pat<(f64 (sint_to_fp GR32:$src)),
785 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
786 def : Pat<(f64 (sint_to_fp GR64:$src)),
787 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
790 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
791 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
792 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
793 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
794 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
795 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
796 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
797 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
798 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
799 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
800 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
801 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
802 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
803 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
804 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
805 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
807 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
808 // and/or XMM operand(s).
810 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
811 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
813 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
814 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
815 [(set DstRC:$dst, (Int SrcRC:$src))]>;
816 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
817 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
818 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
821 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
822 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
823 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
824 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
826 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
827 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
828 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
829 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
830 (ins DstRC:$src1, x86memop:$src2),
832 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
833 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
834 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
837 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
838 f128mem, load, "cvtsd2si">, XD, VEX;
839 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
840 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
843 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
844 // Get rid of this hack or rename the intrinsics, there are several
845 // intructions that only match with the intrinsic form, why create duplicates
846 // to let them be recognized by the assembler?
847 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
848 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
849 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
850 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
851 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
852 f128mem, load, "cvtsd2si{l}">, XD;
853 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
854 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
857 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
858 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
859 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
860 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
862 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
863 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
864 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
865 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
868 let Constraints = "$src1 = $dst" in {
869 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
870 int_x86_sse_cvtsi2ss, i32mem, loadi32,
872 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
873 int_x86_sse_cvtsi642ss, i64mem, loadi64,
874 "cvtsi2ss{q}">, XS, REX_W;
875 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
876 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
878 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
879 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
880 "cvtsi2sd">, XD, REX_W;
885 // Aliases for intrinsics
886 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
887 f32mem, load, "cvttss2si">, XS, VEX;
888 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
889 int_x86_sse_cvttss2si64, f32mem, load,
890 "cvttss2si">, XS, VEX, VEX_W;
891 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
892 f128mem, load, "cvttsd2si">, XD, VEX;
893 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
894 int_x86_sse2_cvttsd2si64, f128mem, load,
895 "cvttsd2si">, XD, VEX, VEX_W;
896 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
897 f32mem, load, "cvttss2si">, XS;
898 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
899 int_x86_sse_cvttss2si64, f32mem, load,
900 "cvttss2si{q}">, XS, REX_W;
901 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
902 f128mem, load, "cvttsd2si">, XD;
903 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
904 int_x86_sse2_cvttsd2si64, f128mem, load,
905 "cvttsd2si{q}">, XD, REX_W;
907 let Pattern = []<dag> in {
908 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
909 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
910 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
911 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
913 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
914 "cvtdq2ps\t{$src, $dst|$dst, $src}",
915 SSEPackedSingle>, TB, VEX;
916 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
917 "cvtdq2ps\t{$src, $dst|$dst, $src}",
918 SSEPackedSingle>, TB, VEX;
921 let Pattern = []<dag> in {
922 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
923 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
924 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
925 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
926 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
927 "cvtdq2ps\t{$src, $dst|$dst, $src}",
928 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
931 let Predicates = [HasSSE1] in {
932 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
933 (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
934 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
935 (CVTSS2SIrm addr:$src)>;
936 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
937 (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
938 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
939 (CVTSS2SI64rm addr:$src)>;
942 let Predicates = [HasAVX] in {
943 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
944 (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
945 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
946 (VCVTSS2SIrm addr:$src)>;
947 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
948 (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
949 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
950 (VCVTSS2SI64rm addr:$src)>;
955 // Convert scalar double to scalar single
956 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
957 (ins FR64:$src1, FR64:$src2),
958 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
960 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
961 (ins FR64:$src1, f64mem:$src2),
962 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
963 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
964 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
967 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
968 "cvtsd2ss\t{$src, $dst|$dst, $src}",
969 [(set FR32:$dst, (fround FR64:$src))]>;
970 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
971 "cvtsd2ss\t{$src, $dst|$dst, $src}",
972 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
973 Requires<[HasSSE2, OptForSize]>;
975 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
976 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
978 let Constraints = "$src1 = $dst" in
979 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
980 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
982 // Convert scalar single to scalar double
983 // SSE2 instructions with XS prefix
984 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
985 (ins FR32:$src1, FR32:$src2),
986 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
987 []>, XS, Requires<[HasAVX]>, VEX_4V;
988 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
989 (ins FR32:$src1, f32mem:$src2),
990 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
991 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
993 let Predicates = [HasAVX] in {
994 def : Pat<(f64 (fextend FR32:$src)),
995 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
996 def : Pat<(fextend (loadf32 addr:$src)),
997 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
998 def : Pat<(extloadf32 addr:$src),
999 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1002 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1003 "cvtss2sd\t{$src, $dst|$dst, $src}",
1004 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1005 Requires<[HasSSE2]>;
1006 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1007 "cvtss2sd\t{$src, $dst|$dst, $src}",
1008 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1009 Requires<[HasSSE2, OptForSize]>;
1011 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1012 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1013 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1014 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1015 VR128:$src2))]>, XS, VEX_4V,
1017 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1018 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1019 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1020 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1021 (load addr:$src2)))]>, XS, VEX_4V,
1023 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1024 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1025 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1026 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1027 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1028 VR128:$src2))]>, XS,
1029 Requires<[HasSSE2]>;
1030 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1031 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1032 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1033 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1034 (load addr:$src2)))]>, XS,
1035 Requires<[HasSSE2]>;
1038 def : Pat<(extloadf32 addr:$src),
1039 (CVTSS2SDrr (MOVSSrm addr:$src))>,
1040 Requires<[HasSSE2, OptForSpeed]>;
1042 // Convert doubleword to packed single/double fp
1043 // SSE2 instructions without OpSize prefix
1044 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1045 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1046 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1047 TB, VEX, Requires<[HasAVX]>;
1048 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1049 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1050 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1051 (bitconvert (memopv2i64 addr:$src))))]>,
1052 TB, VEX, Requires<[HasAVX]>;
1053 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1054 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1055 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1056 TB, Requires<[HasSSE2]>;
1057 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1058 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1059 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1060 (bitconvert (memopv2i64 addr:$src))))]>,
1061 TB, Requires<[HasSSE2]>;
1063 // FIXME: why the non-intrinsic version is described as SSE3?
1064 // SSE2 instructions with XS prefix
1065 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1066 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1067 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1068 XS, VEX, Requires<[HasAVX]>;
1069 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1070 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1071 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1072 (bitconvert (memopv2i64 addr:$src))))]>,
1073 XS, VEX, Requires<[HasAVX]>;
1074 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1075 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1076 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1077 XS, Requires<[HasSSE2]>;
1078 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1079 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1080 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1081 (bitconvert (memopv2i64 addr:$src))))]>,
1082 XS, Requires<[HasSSE2]>;
1085 // Convert packed single/double fp to doubleword
1086 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1087 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1088 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1089 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1090 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1091 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1092 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1093 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1094 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1095 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1096 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1097 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1099 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1100 "cvtps2dq\t{$src, $dst|$dst, $src}",
1101 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
1103 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
1105 "cvtps2dq\t{$src, $dst|$dst, $src}",
1106 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1107 (memop addr:$src)))]>, VEX;
1108 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1109 "cvtps2dq\t{$src, $dst|$dst, $src}",
1110 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1111 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1112 "cvtps2dq\t{$src, $dst|$dst, $src}",
1113 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1114 (memop addr:$src)))]>;
1116 // SSE2 packed instructions with XD prefix
1117 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1118 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1119 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1120 XD, VEX, Requires<[HasAVX]>;
1121 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1122 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1123 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1124 (memop addr:$src)))]>,
1125 XD, VEX, Requires<[HasAVX]>;
1126 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1127 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1128 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1129 XD, Requires<[HasSSE2]>;
1130 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1131 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1132 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1133 (memop addr:$src)))]>,
1134 XD, Requires<[HasSSE2]>;
1137 // Convert with truncation packed single/double fp to doubleword
1138 // SSE2 packed instructions with XS prefix
1139 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1140 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1141 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1142 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1143 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1144 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1145 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1146 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1147 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1148 "cvttps2dq\t{$src, $dst|$dst, $src}",
1150 (int_x86_sse2_cvttps2dq VR128:$src))]>;
1151 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1152 "cvttps2dq\t{$src, $dst|$dst, $src}",
1154 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
1156 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1157 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1159 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1160 XS, VEX, Requires<[HasAVX]>;
1161 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1162 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1163 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1164 (memop addr:$src)))]>,
1165 XS, VEX, Requires<[HasAVX]>;
1167 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1168 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
1169 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1170 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
1172 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1173 (Int_VCVTDQ2PSrr VR128:$src)>, Requires<[HasAVX]>;
1174 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1175 (VCVTTPS2DQrr VR128:$src)>, Requires<[HasAVX]>;
1176 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1177 (VCVTDQ2PSYrr VR256:$src)>, Requires<[HasAVX]>;
1178 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1179 (VCVTTPS2DQYrr VR256:$src)>, Requires<[HasAVX]>;
1181 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1183 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1184 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1186 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1188 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1189 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1190 (memop addr:$src)))]>, VEX;
1191 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1192 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1193 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1194 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1195 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1196 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1197 (memop addr:$src)))]>;
1199 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1200 // register, but the same isn't true when using memory operands instead.
1201 // Provide other assembly rr and rm forms to address this explicitly.
1202 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1203 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1204 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1205 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1208 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1209 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1210 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1211 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1214 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1215 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
1216 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1217 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1219 // Convert packed single to packed double
1220 let Predicates = [HasAVX] in {
1221 // SSE2 instructions without OpSize prefix
1222 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1223 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1224 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1225 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1226 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1227 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1228 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1229 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1231 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1232 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1233 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1234 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1236 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1237 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1238 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1239 TB, VEX, Requires<[HasAVX]>;
1240 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1241 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1242 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1243 (load addr:$src)))]>,
1244 TB, VEX, Requires<[HasAVX]>;
1245 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1246 "cvtps2pd\t{$src, $dst|$dst, $src}",
1247 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1248 TB, Requires<[HasSSE2]>;
1249 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1250 "cvtps2pd\t{$src, $dst|$dst, $src}",
1251 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1252 (load addr:$src)))]>,
1253 TB, Requires<[HasSSE2]>;
1255 // Convert packed double to packed single
1256 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1257 // register, but the same isn't true when using memory operands instead.
1258 // Provide other assembly rr and rm forms to address this explicitly.
1259 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1260 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1261 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1262 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1265 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1266 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1267 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1268 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1271 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1272 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1273 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1274 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1275 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1276 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1277 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1278 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1281 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1282 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1283 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1284 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1286 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1287 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1288 (memop addr:$src)))]>;
1289 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1290 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1291 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1292 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1293 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1294 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1295 (memop addr:$src)))]>;
1297 // AVX 256-bit register conversion intrinsics
1298 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1299 // whenever possible to avoid declaring two versions of each one.
1300 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1301 (VCVTDQ2PSYrr VR256:$src)>;
1302 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1303 (VCVTDQ2PSYrm addr:$src)>;
1305 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1306 (VCVTPD2PSYrr VR256:$src)>;
1307 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1308 (VCVTPD2PSYrm addr:$src)>;
1310 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1311 (VCVTPS2DQYrr VR256:$src)>;
1312 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1313 (VCVTPS2DQYrm addr:$src)>;
1315 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1316 (VCVTPS2PDYrr VR128:$src)>;
1317 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1318 (VCVTPS2PDYrm addr:$src)>;
1320 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1321 (VCVTTPD2DQYrr VR256:$src)>;
1322 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1323 (VCVTTPD2DQYrm addr:$src)>;
1325 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1326 (VCVTTPS2DQYrr VR256:$src)>;
1327 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1328 (VCVTTPS2DQYrm addr:$src)>;
1330 // Match fround and fextend for 128/256-bit conversions
1331 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1332 (VCVTPD2PSYrr VR256:$src)>;
1333 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1334 (VCVTPD2PSYrm addr:$src)>;
1336 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
1337 (VCVTPS2PDYrr VR128:$src)>;
1338 def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
1339 (VCVTPS2PDYrm addr:$src)>;
1341 //===----------------------------------------------------------------------===//
1342 // SSE 1 & 2 - Compare Instructions
1343 //===----------------------------------------------------------------------===//
1345 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1346 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1347 string asm, string asm_alt> {
1348 let isAsmParserOnly = 1 in {
1349 def rr : SIi8<0xC2, MRMSrcReg,
1350 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1353 def rm : SIi8<0xC2, MRMSrcMem,
1354 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1358 // Accept explicit immediate argument form instead of comparison code.
1359 def rr_alt : SIi8<0xC2, MRMSrcReg,
1360 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1363 def rm_alt : SIi8<0xC2, MRMSrcMem,
1364 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1368 let neverHasSideEffects = 1 in {
1369 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1370 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1371 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1373 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1374 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1375 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1379 let Constraints = "$src1 = $dst" in {
1380 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1381 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1382 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1383 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1384 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1385 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1386 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1387 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1388 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1389 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1390 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1391 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1392 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1393 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1394 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1395 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1397 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1398 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1399 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1400 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1401 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1402 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1403 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1404 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1405 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1406 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1407 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1408 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1409 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1412 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1413 Intrinsic Int, string asm> {
1414 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1415 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1416 [(set VR128:$dst, (Int VR128:$src1,
1417 VR128:$src, imm:$cc))]>;
1418 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1419 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1420 [(set VR128:$dst, (Int VR128:$src1,
1421 (load addr:$src), imm:$cc))]>;
1424 // Aliases to match intrinsics which expect XMM operand(s).
1425 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1426 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1428 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1429 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1431 let Constraints = "$src1 = $dst" in {
1432 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1433 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1434 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1435 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1439 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1440 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1441 ValueType vt, X86MemOperand x86memop,
1442 PatFrag ld_frag, string OpcodeStr, Domain d> {
1443 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1444 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1445 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1446 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1447 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1448 [(set EFLAGS, (OpNode (vt RC:$src1),
1449 (ld_frag addr:$src2)))], d>;
1452 let Defs = [EFLAGS] in {
1453 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1454 "ucomiss", SSEPackedSingle>, TB, VEX;
1455 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1456 "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
1457 let Pattern = []<dag> in {
1458 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1459 "comiss", SSEPackedSingle>, TB, VEX;
1460 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1461 "comisd", SSEPackedDouble>, TB, OpSize, VEX;
1464 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1465 load, "ucomiss", SSEPackedSingle>, TB, VEX;
1466 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1467 load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
1469 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1470 load, "comiss", SSEPackedSingle>, TB, VEX;
1471 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1472 load, "comisd", SSEPackedDouble>, TB, OpSize, VEX;
1473 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1474 "ucomiss", SSEPackedSingle>, TB;
1475 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1476 "ucomisd", SSEPackedDouble>, TB, OpSize;
1478 let Pattern = []<dag> in {
1479 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1480 "comiss", SSEPackedSingle>, TB;
1481 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1482 "comisd", SSEPackedDouble>, TB, OpSize;
1485 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1486 load, "ucomiss", SSEPackedSingle>, TB;
1487 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1488 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1490 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1491 "comiss", SSEPackedSingle>, TB;
1492 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1493 "comisd", SSEPackedDouble>, TB, OpSize;
1494 } // Defs = [EFLAGS]
1496 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1497 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1498 Intrinsic Int, string asm, string asm_alt,
1500 let isAsmParserOnly = 1 in {
1501 def rri : PIi8<0xC2, MRMSrcReg,
1502 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1503 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1504 def rmi : PIi8<0xC2, MRMSrcMem,
1505 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1506 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1509 // Accept explicit immediate argument form instead of comparison code.
1510 def rri_alt : PIi8<0xC2, MRMSrcReg,
1511 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1513 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1514 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1518 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1519 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1520 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1521 SSEPackedSingle>, TB, VEX_4V;
1522 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1523 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1524 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1525 SSEPackedDouble>, TB, OpSize, VEX_4V;
1526 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1527 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1528 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1529 SSEPackedSingle>, TB, VEX_4V;
1530 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1531 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1532 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1533 SSEPackedDouble>, TB, OpSize, VEX_4V;
1534 let Constraints = "$src1 = $dst" in {
1535 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1536 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1537 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1538 SSEPackedSingle>, TB;
1539 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1540 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1541 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1542 SSEPackedDouble>, TB, OpSize;
1545 let Predicates = [HasSSE1] in {
1546 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1547 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1548 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1549 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1552 let Predicates = [HasSSE2] in {
1553 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1554 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1555 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1556 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1559 let Predicates = [HasAVX] in {
1560 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1561 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1562 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1563 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1564 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1565 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1566 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1567 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1569 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
1570 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
1571 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
1572 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
1573 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
1574 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
1575 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
1576 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
1579 //===----------------------------------------------------------------------===//
1580 // SSE 1 & 2 - Shuffle Instructions
1581 //===----------------------------------------------------------------------===//
1583 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1584 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1585 ValueType vt, string asm, PatFrag mem_frag,
1586 Domain d, bit IsConvertibleToThreeAddress = 0> {
1587 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1588 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1589 [(set RC:$dst, (vt (shufp:$src3
1590 RC:$src1, (mem_frag addr:$src2))))], d>;
1591 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1592 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1593 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1595 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1598 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1599 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1600 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1601 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1602 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1603 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1604 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1605 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1606 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1607 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1608 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1609 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1611 let Constraints = "$src1 = $dst" in {
1612 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1613 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1614 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1616 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1617 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1618 memopv2f64, SSEPackedDouble>, TB, OpSize;
1621 let Predicates = [HasSSE1] in {
1622 def : Pat<(v4f32 (X86Shufps VR128:$src1,
1623 (memopv4f32 addr:$src2), (i8 imm:$imm))),
1624 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1625 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1626 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1627 def : Pat<(v4i32 (X86Shufps VR128:$src1,
1628 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
1629 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1630 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1631 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1632 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
1633 // fall back to this for SSE1)
1634 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
1635 (SHUFPSrri VR128:$src2, VR128:$src1,
1636 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1637 // Special unary SHUFPSrri case.
1638 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
1639 (SHUFPSrri VR128:$src1, VR128:$src1,
1640 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1643 let Predicates = [HasSSE2] in {
1644 // Special binary v4i32 shuffle cases with SHUFPS.
1645 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
1646 (SHUFPSrri VR128:$src1, VR128:$src2,
1647 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1648 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
1649 (bc_v4i32 (memopv2i64 addr:$src2)))),
1650 (SHUFPSrmi VR128:$src1, addr:$src2,
1651 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1652 // Special unary SHUFPDrri cases.
1653 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
1654 (SHUFPDrri VR128:$src1, VR128:$src1,
1655 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1656 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
1657 (SHUFPDrri VR128:$src1, VR128:$src1,
1658 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1659 // Special binary v2i64 shuffle cases using SHUFPDrri.
1660 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
1661 (SHUFPDrri VR128:$src1, VR128:$src2,
1662 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1663 // Generic SHUFPD patterns
1664 def : Pat<(v2f64 (X86Shufps VR128:$src1,
1665 (memopv2f64 addr:$src2), (i8 imm:$imm))),
1666 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
1667 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1668 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1669 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1670 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1673 let Predicates = [HasAVX] in {
1674 def : Pat<(v4f32 (X86Shufps VR128:$src1,
1675 (memopv4f32 addr:$src2), (i8 imm:$imm))),
1676 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1677 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1678 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1679 def : Pat<(v4i32 (X86Shufps VR128:$src1,
1680 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
1681 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1682 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1683 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1684 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
1685 // fall back to this for SSE1)
1686 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
1687 (VSHUFPSrri VR128:$src2, VR128:$src1,
1688 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1689 // Special unary SHUFPSrri case.
1690 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
1691 (VSHUFPSrri VR128:$src1, VR128:$src1,
1692 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1693 // Special binary v4i32 shuffle cases with SHUFPS.
1694 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
1695 (VSHUFPSrri VR128:$src1, VR128:$src2,
1696 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1697 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
1698 (bc_v4i32 (memopv2i64 addr:$src2)))),
1699 (VSHUFPSrmi VR128:$src1, addr:$src2,
1700 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1701 // Special unary SHUFPDrri cases.
1702 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
1703 (VSHUFPDrri VR128:$src1, VR128:$src1,
1704 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1705 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
1706 (VSHUFPDrri VR128:$src1, VR128:$src1,
1707 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1708 // Special binary v2i64 shuffle cases using SHUFPDrri.
1709 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
1710 (VSHUFPDrri VR128:$src1, VR128:$src2,
1711 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1713 def : Pat<(v2f64 (X86Shufps VR128:$src1,
1714 (memopv2f64 addr:$src2), (i8 imm:$imm))),
1715 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
1716 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1717 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1718 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1719 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1722 def : Pat<(v8i32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
1723 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
1724 def : Pat<(v8i32 (X86Shufps VR256:$src1,
1725 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
1726 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
1728 def : Pat<(v8f32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
1729 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
1730 def : Pat<(v8f32 (X86Shufps VR256:$src1,
1731 (memopv8f32 addr:$src2), (i8 imm:$imm))),
1732 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
1734 def : Pat<(v4i64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
1735 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
1736 def : Pat<(v4i64 (X86Shufpd VR256:$src1,
1737 (memopv4i64 addr:$src2), (i8 imm:$imm))),
1738 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
1740 def : Pat<(v4f64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
1741 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
1742 def : Pat<(v4f64 (X86Shufpd VR256:$src1,
1743 (memopv4f64 addr:$src2), (i8 imm:$imm))),
1744 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
1747 //===----------------------------------------------------------------------===//
1748 // SSE 1 & 2 - Unpack Instructions
1749 //===----------------------------------------------------------------------===//
1751 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1752 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1753 PatFrag mem_frag, RegisterClass RC,
1754 X86MemOperand x86memop, string asm,
1756 def rr : PI<opc, MRMSrcReg,
1757 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1759 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1760 def rm : PI<opc, MRMSrcMem,
1761 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1763 (vt (OpNode RC:$src1,
1764 (mem_frag addr:$src2))))], d>;
1767 let AddedComplexity = 10 in {
1768 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1769 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1770 SSEPackedSingle>, TB, VEX_4V;
1771 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1772 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1773 SSEPackedDouble>, TB, OpSize, VEX_4V;
1774 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1775 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1776 SSEPackedSingle>, TB, VEX_4V;
1777 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1778 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1779 SSEPackedDouble>, TB, OpSize, VEX_4V;
1781 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1782 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1783 SSEPackedSingle>, TB, VEX_4V;
1784 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1785 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1786 SSEPackedDouble>, TB, OpSize, VEX_4V;
1787 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1788 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1789 SSEPackedSingle>, TB, VEX_4V;
1790 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1791 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1792 SSEPackedDouble>, TB, OpSize, VEX_4V;
1794 let Constraints = "$src1 = $dst" in {
1795 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1796 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1797 SSEPackedSingle>, TB;
1798 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1799 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1800 SSEPackedDouble>, TB, OpSize;
1801 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1802 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1803 SSEPackedSingle>, TB;
1804 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1805 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1806 SSEPackedDouble>, TB, OpSize;
1807 } // Constraints = "$src1 = $dst"
1808 } // AddedComplexity
1810 let Predicates = [HasSSE1] in {
1811 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
1812 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
1813 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
1814 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
1815 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
1816 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
1817 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
1818 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
1821 let Predicates = [HasSSE2] in {
1822 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
1823 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
1824 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
1825 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
1826 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
1827 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
1828 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
1829 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
1831 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
1832 // problem is during lowering, where it's not possible to recognize the load
1833 // fold cause it has two uses through a bitcast. One use disappears at isel
1834 // time and the fold opportunity reappears.
1835 def : Pat<(v2f64 (X86Movddup VR128:$src)),
1836 (UNPCKLPDrr VR128:$src, VR128:$src)>;
1838 let AddedComplexity = 10 in
1839 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
1840 (UNPCKLPDrr VR128:$src, VR128:$src)>;
1843 let Predicates = [HasAVX] in {
1844 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
1845 (VUNPCKLPSrm VR128:$src1, addr:$src2)>;
1846 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
1847 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>;
1848 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
1849 (VUNPCKHPSrm VR128:$src1, addr:$src2)>;
1850 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
1851 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>;
1853 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
1854 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
1855 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
1856 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
1857 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
1858 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
1859 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, (memopv8i32 addr:$src2))),
1860 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
1861 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
1862 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
1863 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
1864 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
1865 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, (memopv8i32 addr:$src2))),
1866 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
1867 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
1868 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
1870 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
1871 (VUNPCKLPDrm VR128:$src1, addr:$src2)>;
1872 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
1873 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>;
1874 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
1875 (VUNPCKHPDrm VR128:$src1, addr:$src2)>;
1876 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
1877 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>;
1879 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
1880 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
1881 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
1882 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
1883 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, (memopv4i64 addr:$src2))),
1884 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
1885 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
1886 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
1887 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
1888 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
1889 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
1890 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
1891 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, (memopv4i64 addr:$src2))),
1892 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
1893 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
1894 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
1896 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
1897 // problem is during lowering, where it's not possible to recognize the load
1898 // fold cause it has two uses through a bitcast. One use disappears at isel
1899 // time and the fold opportunity reappears.
1900 def : Pat<(v2f64 (X86Movddup VR128:$src)),
1901 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
1902 let AddedComplexity = 10 in
1903 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
1904 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
1907 //===----------------------------------------------------------------------===//
1908 // SSE 1 & 2 - Extract Floating-Point Sign mask
1909 //===----------------------------------------------------------------------===//
1911 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1912 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1914 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1915 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1916 [(set GR32:$dst, (Int RC:$src))], d>;
1917 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1918 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1921 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1922 SSEPackedSingle>, TB;
1923 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1924 SSEPackedDouble>, TB, OpSize;
1926 def : Pat<(i32 (X86fgetsign FR32:$src)),
1927 (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1928 sub_ss))>, Requires<[HasSSE1]>;
1929 def : Pat<(i64 (X86fgetsign FR32:$src)),
1930 (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1931 sub_ss))>, Requires<[HasSSE1]>;
1932 def : Pat<(i32 (X86fgetsign FR64:$src)),
1933 (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1934 sub_sd))>, Requires<[HasSSE2]>;
1935 def : Pat<(i64 (X86fgetsign FR64:$src)),
1936 (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1937 sub_sd))>, Requires<[HasSSE2]>;
1939 let Predicates = [HasAVX] in {
1940 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1941 "movmskps", SSEPackedSingle>, TB, VEX;
1942 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1943 "movmskpd", SSEPackedDouble>, TB, OpSize,
1945 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1946 "movmskps", SSEPackedSingle>, TB, VEX;
1947 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1948 "movmskpd", SSEPackedDouble>, TB, OpSize,
1951 def : Pat<(i32 (X86fgetsign FR32:$src)),
1952 (VMOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1954 def : Pat<(i64 (X86fgetsign FR32:$src)),
1955 (VMOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1957 def : Pat<(i32 (X86fgetsign FR64:$src)),
1958 (VMOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1960 def : Pat<(i64 (X86fgetsign FR64:$src)),
1961 (VMOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1965 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1966 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
1967 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1968 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB, OpSize,
1970 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1971 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
1972 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1973 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB, OpSize,
1977 //===----------------------------------------------------------------------===//
1978 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1979 //===----------------------------------------------------------------------===//
1981 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1982 // names that start with 'Fs'.
1984 // Alias instructions that map fld0 to pxor for sse.
1985 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1986 canFoldAsLoad = 1 in {
1987 // FIXME: Set encoding to pseudo!
1988 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1989 [(set FR32:$dst, fp32imm0)]>,
1990 Requires<[HasSSE1]>, TB, OpSize;
1991 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1992 [(set FR64:$dst, fpimm0)]>,
1993 Requires<[HasSSE2]>, TB, OpSize;
1994 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1995 [(set FR32:$dst, fp32imm0)]>,
1996 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1997 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1998 [(set FR64:$dst, fpimm0)]>,
1999 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
2002 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
2003 // bits are disregarded.
2004 let neverHasSideEffects = 1 in {
2005 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2006 "movaps\t{$src, $dst|$dst, $src}", []>;
2007 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2008 "movapd\t{$src, $dst|$dst, $src}", []>;
2011 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
2012 // bits are disregarded.
2013 let canFoldAsLoad = 1, isReMaterializable = 1 in {
2014 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
2015 "movaps\t{$src, $dst|$dst, $src}",
2016 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
2017 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
2018 "movapd\t{$src, $dst|$dst, $src}",
2019 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
2022 //===----------------------------------------------------------------------===//
2023 // SSE 1 & 2 - Logical Instructions
2024 //===----------------------------------------------------------------------===//
2026 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
2028 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
2030 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2031 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, TB, VEX_4V;
2033 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2034 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, TB, OpSize, VEX_4V;
2036 let Constraints = "$src1 = $dst" in {
2037 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2038 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
2040 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2041 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
2045 // Alias bitwise logical operations using SSE logical ops on packed FP values.
2046 let mayLoad = 0 in {
2047 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
2048 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
2049 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
2052 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
2053 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
2055 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2057 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2059 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2060 // are all promoted to v2i64, and the patterns are covered by the int
2061 // version. This is needed in SSE only, because v2i64 isn't supported on
2062 // SSE1, but only on SSE2.
2063 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2064 !strconcat(OpcodeStr, "ps"), f128mem, [],
2065 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2066 (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
2068 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2069 !strconcat(OpcodeStr, "pd"), f128mem,
2070 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2071 (bc_v2i64 (v2f64 VR128:$src2))))],
2072 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2073 (memopv2i64 addr:$src2)))], 0>,
2075 let Constraints = "$src1 = $dst" in {
2076 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2077 !strconcat(OpcodeStr, "ps"), f128mem,
2078 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2079 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2080 (memopv2i64 addr:$src2)))]>, TB;
2082 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2083 !strconcat(OpcodeStr, "pd"), f128mem,
2084 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2085 (bc_v2i64 (v2f64 VR128:$src2))))],
2086 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2087 (memopv2i64 addr:$src2)))]>, TB, OpSize;
2091 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
2093 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
2095 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2096 !strconcat(OpcodeStr, "ps"), f256mem,
2097 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2098 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2099 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
2101 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2102 !strconcat(OpcodeStr, "pd"), f256mem,
2103 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2104 (bc_v4i64 (v4f64 VR256:$src2))))],
2105 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2106 (memopv4i64 addr:$src2)))], 0>,
2110 // AVX 256-bit packed logical ops forms
2111 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
2112 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
2113 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
2114 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
2116 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2117 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2118 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2119 let isCommutable = 0 in
2120 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2122 //===----------------------------------------------------------------------===//
2123 // SSE 1 & 2 - Arithmetic Instructions
2124 //===----------------------------------------------------------------------===//
2126 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2129 /// In addition, we also have a special variant of the scalar form here to
2130 /// represent the associated intrinsic operation. This form is unlike the
2131 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2132 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2134 /// These three forms can each be reg+reg or reg+mem.
2137 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2139 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2141 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2142 OpNode, FR32, f32mem, Is2Addr>, XS;
2143 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2144 OpNode, FR64, f64mem, Is2Addr>, XD;
2147 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
2149 let mayLoad = 0 in {
2150 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2151 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
2152 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2153 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
2157 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
2159 let mayLoad = 0 in {
2160 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
2161 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
2162 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
2163 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
2167 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2169 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2170 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
2171 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2172 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
2175 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
2177 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2178 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
2179 SSEPackedSingle, Is2Addr>, TB;
2181 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2182 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
2183 SSEPackedDouble, Is2Addr>, TB, OpSize;
2186 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
2187 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2188 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
2189 SSEPackedSingle, 0>, TB;
2191 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2192 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
2193 SSEPackedDouble, 0>, TB, OpSize;
2196 // Binary Arithmetic instructions
2197 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
2198 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
2199 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
2200 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
2201 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
2202 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
2203 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
2204 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
2206 let isCommutable = 0 in {
2207 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
2208 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
2209 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
2210 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
2211 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
2212 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
2213 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
2214 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
2215 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
2216 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
2217 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
2218 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
2219 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
2220 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
2221 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
2222 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
2223 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
2224 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
2225 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
2226 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
2229 let Constraints = "$src1 = $dst" in {
2230 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
2231 basic_sse12_fp_binop_p<0x58, "add", fadd>,
2232 basic_sse12_fp_binop_s_int<0x58, "add">;
2233 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
2234 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
2235 basic_sse12_fp_binop_s_int<0x59, "mul">;
2237 let isCommutable = 0 in {
2238 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
2239 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
2240 basic_sse12_fp_binop_s_int<0x5C, "sub">;
2241 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
2242 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
2243 basic_sse12_fp_binop_s_int<0x5E, "div">;
2244 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
2245 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
2246 basic_sse12_fp_binop_s_int<0x5F, "max">,
2247 basic_sse12_fp_binop_p_int<0x5F, "max">;
2248 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
2249 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
2250 basic_sse12_fp_binop_s_int<0x5D, "min">,
2251 basic_sse12_fp_binop_p_int<0x5D, "min">;
2256 /// In addition, we also have a special variant of the scalar form here to
2257 /// represent the associated intrinsic operation. This form is unlike the
2258 /// plain scalar form, in that it takes an entire vector (instead of a
2259 /// scalar) and leaves the top elements undefined.
2261 /// And, we have a special variant form for a full-vector intrinsic form.
2263 /// sse1_fp_unop_s - SSE1 unops in scalar form.
2264 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
2265 SDNode OpNode, Intrinsic F32Int> {
2266 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2267 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2268 [(set FR32:$dst, (OpNode FR32:$src))]>;
2269 // For scalar unary operations, fold a load into the operation
2270 // only in OptForSize mode. It eliminates an instruction, but it also
2271 // eliminates a whole-register clobber (the load), so it introduces a
2272 // partial register update condition.
2273 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
2274 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2275 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
2276 Requires<[HasSSE1, OptForSize]>;
2277 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2278 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2279 [(set VR128:$dst, (F32Int VR128:$src))]>;
2280 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
2281 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2282 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
2285 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
2286 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2287 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
2288 !strconcat(OpcodeStr,
2289 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2290 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
2291 !strconcat(OpcodeStr,
2292 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2293 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
2294 (ins ssmem:$src1, VR128:$src2),
2295 !strconcat(OpcodeStr,
2296 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2299 /// sse1_fp_unop_p - SSE1 unops in packed form.
2300 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2301 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2302 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2303 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
2304 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2305 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2306 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
2309 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
2310 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2311 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2312 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2313 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
2314 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2315 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2316 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
2319 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
2320 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2321 Intrinsic V4F32Int> {
2322 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2323 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2324 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
2325 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2326 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2327 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
2330 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
2331 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2332 Intrinsic V4F32Int> {
2333 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2334 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2335 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
2336 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2337 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2338 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
2341 /// sse2_fp_unop_s - SSE2 unops in scalar form.
2342 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
2343 SDNode OpNode, Intrinsic F64Int> {
2344 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2345 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2346 [(set FR64:$dst, (OpNode FR64:$src))]>;
2347 // See the comments in sse1_fp_unop_s for why this is OptForSize.
2348 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
2349 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2350 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
2351 Requires<[HasSSE2, OptForSize]>;
2352 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2353 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2354 [(set VR128:$dst, (F64Int VR128:$src))]>;
2355 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
2356 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2357 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
2360 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
2361 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2362 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
2363 !strconcat(OpcodeStr,
2364 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2365 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
2366 !strconcat(OpcodeStr,
2367 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2368 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
2369 (ins VR128:$src1, sdmem:$src2),
2370 !strconcat(OpcodeStr,
2371 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2374 /// sse2_fp_unop_p - SSE2 unops in vector forms.
2375 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
2377 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2378 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2379 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
2380 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2381 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2382 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
2385 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
2386 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2387 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2388 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2389 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
2390 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2391 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2392 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
2395 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
2396 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2397 Intrinsic V2F64Int> {
2398 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2399 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2400 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
2401 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2402 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2403 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
2406 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
2407 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2408 Intrinsic V2F64Int> {
2409 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2410 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2411 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
2412 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2413 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2414 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
2417 let Predicates = [HasAVX] in {
2419 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
2420 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V;
2422 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
2423 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
2424 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2425 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2426 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
2427 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
2428 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
2429 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
2432 // Reciprocal approximations. Note that these typically require refinement
2433 // in order to obtain suitable precision.
2434 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V;
2435 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
2436 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
2437 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
2438 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
2440 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V;
2441 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
2442 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
2443 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
2444 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
2447 def : Pat<(f32 (fsqrt FR32:$src)),
2448 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2449 def : Pat<(f32 (fsqrt (load addr:$src))),
2450 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2451 Requires<[HasAVX, OptForSize]>;
2452 def : Pat<(f64 (fsqrt FR64:$src)),
2453 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
2454 def : Pat<(f64 (fsqrt (load addr:$src))),
2455 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
2456 Requires<[HasAVX, OptForSize]>;
2458 def : Pat<(f32 (X86frsqrt FR32:$src)),
2459 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2460 def : Pat<(f32 (X86frsqrt (load addr:$src))),
2461 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2462 Requires<[HasAVX, OptForSize]>;
2464 def : Pat<(f32 (X86frcp FR32:$src)),
2465 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2466 def : Pat<(f32 (X86frcp (load addr:$src))),
2467 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2468 Requires<[HasAVX, OptForSize]>;
2470 let Predicates = [HasAVX] in {
2471 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
2472 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2473 (VSQRTSSr (f32 (IMPLICIT_DEF)),
2474 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2476 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
2477 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2479 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
2480 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
2481 (VSQRTSDr (f64 (IMPLICIT_DEF)),
2482 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd)),
2484 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
2485 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
2487 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
2488 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2489 (VRSQRTSSr (f32 (IMPLICIT_DEF)),
2490 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2492 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
2493 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2495 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
2496 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2497 (VRCPSSr (f32 (IMPLICIT_DEF)),
2498 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2500 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
2501 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2505 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2506 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
2507 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
2508 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2509 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
2510 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
2512 // Reciprocal approximations. Note that these typically require refinement
2513 // in order to obtain suitable precision.
2514 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2515 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
2516 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
2517 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2518 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
2519 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
2521 // There is no f64 version of the reciprocal approximation instructions.
2523 //===----------------------------------------------------------------------===//
2524 // SSE 1 & 2 - Non-temporal stores
2525 //===----------------------------------------------------------------------===//
2527 let AddedComplexity = 400 in { // Prefer non-temporal versions
2528 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2529 (ins f128mem:$dst, VR128:$src),
2530 "movntps\t{$src, $dst|$dst, $src}",
2531 [(alignednontemporalstore (v4f32 VR128:$src),
2533 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2534 (ins f128mem:$dst, VR128:$src),
2535 "movntpd\t{$src, $dst|$dst, $src}",
2536 [(alignednontemporalstore (v2f64 VR128:$src),
2538 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2539 (ins f128mem:$dst, VR128:$src),
2540 "movntdq\t{$src, $dst|$dst, $src}",
2541 [(alignednontemporalstore (v2f64 VR128:$src),
2544 let ExeDomain = SSEPackedInt in
2545 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2546 (ins f128mem:$dst, VR128:$src),
2547 "movntdq\t{$src, $dst|$dst, $src}",
2548 [(alignednontemporalstore (v4f32 VR128:$src),
2551 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2552 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
2554 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2555 (ins f256mem:$dst, VR256:$src),
2556 "movntps\t{$src, $dst|$dst, $src}",
2557 [(alignednontemporalstore (v8f32 VR256:$src),
2559 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2560 (ins f256mem:$dst, VR256:$src),
2561 "movntpd\t{$src, $dst|$dst, $src}",
2562 [(alignednontemporalstore (v4f64 VR256:$src),
2564 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2565 (ins f256mem:$dst, VR256:$src),
2566 "movntdq\t{$src, $dst|$dst, $src}",
2567 [(alignednontemporalstore (v4f64 VR256:$src),
2569 let ExeDomain = SSEPackedInt in
2570 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2571 (ins f256mem:$dst, VR256:$src),
2572 "movntdq\t{$src, $dst|$dst, $src}",
2573 [(alignednontemporalstore (v8f32 VR256:$src),
2577 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2578 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2579 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2580 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2581 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2582 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2584 let AddedComplexity = 400 in { // Prefer non-temporal versions
2585 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2586 "movntps\t{$src, $dst|$dst, $src}",
2587 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2588 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2589 "movntpd\t{$src, $dst|$dst, $src}",
2590 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2592 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2593 "movntdq\t{$src, $dst|$dst, $src}",
2594 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2596 let ExeDomain = SSEPackedInt in
2597 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2598 "movntdq\t{$src, $dst|$dst, $src}",
2599 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2601 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2602 (MOVNTDQmr addr:$dst, VR128:$src)>;
2604 // There is no AVX form for instructions below this point
2605 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2606 "movnti{l}\t{$src, $dst|$dst, $src}",
2607 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2608 TB, Requires<[HasSSE2]>;
2609 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2610 "movnti{q}\t{$src, $dst|$dst, $src}",
2611 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2612 TB, Requires<[HasSSE2]>;
2615 //===----------------------------------------------------------------------===//
2616 // SSE 1 & 2 - Prefetch and memory fence
2617 //===----------------------------------------------------------------------===//
2619 // Prefetch intrinsic.
2620 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2621 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
2622 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2623 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
2624 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2625 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
2626 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2627 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
2629 // Load, store, and memory fence
2630 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2631 TB, Requires<[HasSSE1]>;
2632 def : Pat<(X86SFence), (SFENCE)>;
2634 //===----------------------------------------------------------------------===//
2635 // SSE 1 & 2 - Load/Store XCSR register
2636 //===----------------------------------------------------------------------===//
2638 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2639 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2640 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2641 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2643 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2644 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2645 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2646 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2648 //===---------------------------------------------------------------------===//
2649 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2650 //===---------------------------------------------------------------------===//
2652 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2654 let neverHasSideEffects = 1 in {
2655 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2656 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2657 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2658 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2660 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2661 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2662 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2663 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2665 let canFoldAsLoad = 1, mayLoad = 1 in {
2666 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2667 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2668 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2669 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2670 let Predicates = [HasAVX] in {
2671 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2672 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2673 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2674 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2678 let mayStore = 1 in {
2679 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2680 (ins i128mem:$dst, VR128:$src),
2681 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2682 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2683 (ins i256mem:$dst, VR256:$src),
2684 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2685 let Predicates = [HasAVX] in {
2686 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2687 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2688 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2689 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2693 let neverHasSideEffects = 1 in
2694 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2695 "movdqa\t{$src, $dst|$dst, $src}", []>;
2697 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2698 "movdqu\t{$src, $dst|$dst, $src}",
2699 []>, XS, Requires<[HasSSE2]>;
2701 let canFoldAsLoad = 1, mayLoad = 1 in {
2702 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2703 "movdqa\t{$src, $dst|$dst, $src}",
2704 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2705 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2706 "movdqu\t{$src, $dst|$dst, $src}",
2707 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2708 XS, Requires<[HasSSE2]>;
2711 let mayStore = 1 in {
2712 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2713 "movdqa\t{$src, $dst|$dst, $src}",
2714 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2715 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2716 "movdqu\t{$src, $dst|$dst, $src}",
2717 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2718 XS, Requires<[HasSSE2]>;
2721 // Intrinsic forms of MOVDQU load and store
2722 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2723 "vmovdqu\t{$src, $dst|$dst, $src}",
2724 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2725 XS, VEX, Requires<[HasAVX]>;
2727 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2728 "movdqu\t{$src, $dst|$dst, $src}",
2729 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2730 XS, Requires<[HasSSE2]>;
2732 } // ExeDomain = SSEPackedInt
2734 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2735 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2736 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2738 //===---------------------------------------------------------------------===//
2739 // SSE2 - Packed Integer Arithmetic Instructions
2740 //===---------------------------------------------------------------------===//
2742 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2744 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2745 bit IsCommutable = 0, bit Is2Addr = 1> {
2746 let isCommutable = IsCommutable in
2747 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2748 (ins VR128:$src1, VR128:$src2),
2750 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2751 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2752 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2753 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2754 (ins VR128:$src1, i128mem:$src2),
2756 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2757 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2758 [(set VR128:$dst, (IntId VR128:$src1,
2759 (bitconvert (memopv2i64 addr:$src2))))]>;
2762 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2763 string OpcodeStr, Intrinsic IntId,
2764 Intrinsic IntId2, bit Is2Addr = 1> {
2765 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2766 (ins VR128:$src1, VR128:$src2),
2768 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2769 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2770 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2771 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2772 (ins VR128:$src1, i128mem:$src2),
2774 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2775 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2776 [(set VR128:$dst, (IntId VR128:$src1,
2777 (bitconvert (memopv2i64 addr:$src2))))]>;
2778 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2779 (ins VR128:$src1, i32i8imm:$src2),
2781 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2782 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2783 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2786 /// PDI_binop_rm - Simple SSE2 binary operator.
2787 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2788 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2789 let isCommutable = IsCommutable in
2790 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2791 (ins VR128:$src1, VR128:$src2),
2793 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2794 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2795 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2796 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2797 (ins VR128:$src1, i128mem:$src2),
2799 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2800 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2801 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2802 (bitconvert (memopv2i64 addr:$src2)))))]>;
2805 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2807 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2808 /// to collapse (bitconvert VT to VT) into its operand.
2810 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2811 bit IsCommutable = 0, bit Is2Addr = 1> {
2812 let isCommutable = IsCommutable in
2813 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2814 (ins VR128:$src1, VR128:$src2),
2816 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2817 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2818 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2819 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2820 (ins VR128:$src1, i128mem:$src2),
2822 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2823 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2824 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2827 } // ExeDomain = SSEPackedInt
2829 // 128-bit Integer Arithmetic
2831 let Predicates = [HasAVX] in {
2832 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2833 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2834 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2835 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2836 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2837 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2838 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2839 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2840 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2843 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2845 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2847 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2849 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2851 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2853 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2855 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2857 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2859 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2861 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2863 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2865 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2867 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2869 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2871 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2873 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2875 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2877 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2879 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2883 let Constraints = "$src1 = $dst" in {
2884 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2885 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2886 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2887 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2888 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2889 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2890 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2891 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2892 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2895 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2896 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2897 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2898 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2899 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2900 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2901 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2902 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2903 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2904 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2905 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2906 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2907 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2908 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2909 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2910 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2911 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2912 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2913 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2915 } // Constraints = "$src1 = $dst"
2917 //===---------------------------------------------------------------------===//
2918 // SSE2 - Packed Integer Logical Instructions
2919 //===---------------------------------------------------------------------===//
2921 let Predicates = [HasAVX] in {
2922 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2923 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2925 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2926 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2928 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2929 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2932 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2933 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2935 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2936 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2938 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2939 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2942 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2943 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2945 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2946 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2949 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2950 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2951 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2953 let ExeDomain = SSEPackedInt in {
2954 let neverHasSideEffects = 1 in {
2955 // 128-bit logical shifts.
2956 def VPSLLDQri : PDIi8<0x73, MRM7r,
2957 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2958 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2960 def VPSRLDQri : PDIi8<0x73, MRM3r,
2961 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2962 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2964 // PSRADQri doesn't exist in SSE[1-3].
2966 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2967 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2968 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2970 (v2i64 (X86andnp VR128:$src1, VR128:$src2)))]>,VEX_4V;
2972 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2973 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2974 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2975 [(set VR128:$dst, (X86andnp VR128:$src1,
2976 (memopv2i64 addr:$src2)))]>, VEX_4V;
2980 let Constraints = "$src1 = $dst" in {
2981 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2982 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2983 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2984 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2985 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2986 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2988 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2989 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2990 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2991 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2992 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2993 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2995 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2996 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2997 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2998 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
3000 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
3001 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
3002 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
3004 let ExeDomain = SSEPackedInt in {
3005 let neverHasSideEffects = 1 in {
3006 // 128-bit logical shifts.
3007 def PSLLDQri : PDIi8<0x73, MRM7r,
3008 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3009 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
3010 def PSRLDQri : PDIi8<0x73, MRM3r,
3011 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3012 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
3013 // PSRADQri doesn't exist in SSE[1-3].
3015 def PANDNrr : PDI<0xDF, MRMSrcReg,
3016 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3017 "pandn\t{$src2, $dst|$dst, $src2}", []>;
3019 def PANDNrm : PDI<0xDF, MRMSrcMem,
3020 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3021 "pandn\t{$src2, $dst|$dst, $src2}", []>;
3023 } // Constraints = "$src1 = $dst"
3025 let Predicates = [HasAVX] in {
3026 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3027 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3028 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3029 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3030 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
3031 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
3032 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
3033 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
3034 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3035 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3037 // Shift up / down and insert zero's.
3038 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
3039 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3040 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
3041 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3044 let Predicates = [HasSSE2] in {
3045 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3046 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3047 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3048 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3049 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
3050 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
3051 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
3052 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
3053 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3054 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3056 // Shift up / down and insert zero's.
3057 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
3058 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3059 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
3060 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3063 //===---------------------------------------------------------------------===//
3064 // SSE2 - Packed Integer Comparison Instructions
3065 //===---------------------------------------------------------------------===//
3067 let Predicates = [HasAVX] in {
3068 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
3070 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
3072 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
3074 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
3076 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
3078 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
3081 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
3082 (VPCMPEQBrr VR128:$src1, VR128:$src2)>;
3083 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
3084 (VPCMPEQBrm VR128:$src1, addr:$src2)>;
3085 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
3086 (VPCMPEQWrr VR128:$src1, VR128:$src2)>;
3087 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
3088 (VPCMPEQWrm VR128:$src1, addr:$src2)>;
3089 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
3090 (VPCMPEQDrr VR128:$src1, VR128:$src2)>;
3091 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
3092 (VPCMPEQDrm VR128:$src1, addr:$src2)>;
3094 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
3095 (VPCMPGTBrr VR128:$src1, VR128:$src2)>;
3096 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
3097 (VPCMPGTBrm VR128:$src1, addr:$src2)>;
3098 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
3099 (VPCMPGTWrr VR128:$src1, VR128:$src2)>;
3100 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
3101 (VPCMPGTWrm VR128:$src1, addr:$src2)>;
3102 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
3103 (VPCMPGTDrr VR128:$src1, VR128:$src2)>;
3104 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
3105 (VPCMPGTDrm VR128:$src1, addr:$src2)>;
3108 let Constraints = "$src1 = $dst" in {
3109 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
3110 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
3111 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
3112 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
3113 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
3114 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
3115 } // Constraints = "$src1 = $dst"
3117 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
3118 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
3119 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
3120 (PCMPEQBrm VR128:$src1, addr:$src2)>;
3121 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
3122 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
3123 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
3124 (PCMPEQWrm VR128:$src1, addr:$src2)>;
3125 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
3126 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
3127 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
3128 (PCMPEQDrm VR128:$src1, addr:$src2)>;
3130 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
3131 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
3132 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
3133 (PCMPGTBrm VR128:$src1, addr:$src2)>;
3134 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
3135 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
3136 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
3137 (PCMPGTWrm VR128:$src1, addr:$src2)>;
3138 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
3139 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
3140 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
3141 (PCMPGTDrm VR128:$src1, addr:$src2)>;
3143 //===---------------------------------------------------------------------===//
3144 // SSE2 - Packed Integer Pack Instructions
3145 //===---------------------------------------------------------------------===//
3147 let Predicates = [HasAVX] in {
3148 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
3150 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
3152 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
3156 let Constraints = "$src1 = $dst" in {
3157 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
3158 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
3159 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
3160 } // Constraints = "$src1 = $dst"
3162 //===---------------------------------------------------------------------===//
3163 // SSE2 - Packed Integer Shuffle Instructions
3164 //===---------------------------------------------------------------------===//
3166 let ExeDomain = SSEPackedInt in {
3167 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
3169 def ri : Ii8<0x70, MRMSrcReg,
3170 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
3171 !strconcat(OpcodeStr,
3172 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3173 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
3175 def mi : Ii8<0x70, MRMSrcMem,
3176 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
3177 !strconcat(OpcodeStr,
3178 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3179 [(set VR128:$dst, (vt (pshuf_frag:$src2
3180 (bc_frag (memopv2i64 addr:$src1)),
3183 } // ExeDomain = SSEPackedInt
3185 let Predicates = [HasAVX] in {
3186 let AddedComplexity = 5 in
3187 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize,
3190 // SSE2 with ImmT == Imm8 and XS prefix.
3191 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
3194 // SSE2 with ImmT == Imm8 and XD prefix.
3195 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
3198 let AddedComplexity = 5 in
3199 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3200 (VPSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3201 // Unary v4f32 shuffle with VPSHUF* in order to fold a load.
3202 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3203 (VPSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3205 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3207 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
3208 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3210 (VPSHUFDmi addr:$src1, imm:$imm)>;
3211 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3212 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
3213 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3214 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
3215 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3216 (VPSHUFHWri VR128:$src, imm:$imm)>;
3217 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3219 (VPSHUFHWmi addr:$src, imm:$imm)>;
3220 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3221 (VPSHUFLWri VR128:$src, imm:$imm)>;
3222 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3224 (VPSHUFLWmi addr:$src, imm:$imm)>;
3227 let Predicates = [HasSSE2] in {
3228 let AddedComplexity = 5 in
3229 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
3231 // SSE2 with ImmT == Imm8 and XS prefix.
3232 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
3234 // SSE2 with ImmT == Imm8 and XD prefix.
3235 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
3237 let AddedComplexity = 5 in
3238 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3239 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3240 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3241 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3242 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3244 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3246 (PSHUFDmi addr:$src1, imm:$imm)>;
3247 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3249 (PSHUFDmi addr:$src1, imm:$imm)>;
3250 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3251 (PSHUFDri VR128:$src1, imm:$imm)>;
3252 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3253 (PSHUFDri VR128:$src1, imm:$imm)>;
3254 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3255 (PSHUFHWri VR128:$src, imm:$imm)>;
3256 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3258 (PSHUFHWmi addr:$src, imm:$imm)>;
3259 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3260 (PSHUFLWri VR128:$src, imm:$imm)>;
3261 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3263 (PSHUFLWmi addr:$src, imm:$imm)>;
3266 //===---------------------------------------------------------------------===//
3267 // SSE2 - Packed Integer Unpack Instructions
3268 //===---------------------------------------------------------------------===//
3270 let ExeDomain = SSEPackedInt in {
3271 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
3272 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
3273 def rr : PDI<opc, MRMSrcReg,
3274 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3276 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3277 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3278 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
3279 def rm : PDI<opc, MRMSrcMem,
3280 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3282 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3283 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3284 [(set VR128:$dst, (OpNode VR128:$src1,
3285 (bc_frag (memopv2i64
3289 let Predicates = [HasAVX] in {
3290 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
3291 bc_v16i8, 0>, VEX_4V;
3292 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
3293 bc_v8i16, 0>, VEX_4V;
3294 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
3295 bc_v4i32, 0>, VEX_4V;
3297 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3298 /// knew to collapse (bitconvert VT to VT) into its operand.
3299 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3300 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3301 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3302 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3303 VR128:$src2)))]>, VEX_4V;
3304 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3305 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3306 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3307 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3308 (memopv2i64 addr:$src2))))]>, VEX_4V;
3310 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
3311 bc_v16i8, 0>, VEX_4V;
3312 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
3313 bc_v8i16, 0>, VEX_4V;
3314 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
3315 bc_v4i32, 0>, VEX_4V;
3317 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3318 /// knew to collapse (bitconvert VT to VT) into its operand.
3319 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3320 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3321 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3322 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3323 VR128:$src2)))]>, VEX_4V;
3324 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3325 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3326 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3327 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3328 (memopv2i64 addr:$src2))))]>, VEX_4V;
3331 let Constraints = "$src1 = $dst" in {
3332 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
3333 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
3334 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
3336 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3337 /// knew to collapse (bitconvert VT to VT) into its operand.
3338 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3339 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3340 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3342 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
3343 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3344 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3345 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3347 (v2i64 (X86Punpcklqdq VR128:$src1,
3348 (memopv2i64 addr:$src2))))]>;
3350 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
3351 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
3352 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
3354 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3355 /// knew to collapse (bitconvert VT to VT) into its operand.
3356 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3357 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3358 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3360 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
3361 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3362 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3363 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3365 (v2i64 (X86Punpckhqdq VR128:$src1,
3366 (memopv2i64 addr:$src2))))]>;
3369 } // ExeDomain = SSEPackedInt
3371 //===---------------------------------------------------------------------===//
3372 // SSE2 - Packed Integer Extract and Insert
3373 //===---------------------------------------------------------------------===//
3375 let ExeDomain = SSEPackedInt in {
3376 multiclass sse2_pinsrw<bit Is2Addr = 1> {
3377 def rri : Ii8<0xC4, MRMSrcReg,
3378 (outs VR128:$dst), (ins VR128:$src1,
3379 GR32:$src2, i32i8imm:$src3),
3381 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3382 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3384 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
3385 def rmi : Ii8<0xC4, MRMSrcMem,
3386 (outs VR128:$dst), (ins VR128:$src1,
3387 i16mem:$src2, i32i8imm:$src3),
3389 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3390 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3392 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
3397 let Predicates = [HasAVX] in
3398 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
3399 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3400 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3401 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3402 imm:$src2))]>, TB, OpSize, VEX;
3403 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
3404 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3405 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3406 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3410 let Predicates = [HasAVX] in {
3411 defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
3412 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
3413 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
3414 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
3415 []>, TB, OpSize, VEX_4V;
3418 let Constraints = "$src1 = $dst" in
3419 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
3421 } // ExeDomain = SSEPackedInt
3423 //===---------------------------------------------------------------------===//
3424 // SSE2 - Packed Mask Creation
3425 //===---------------------------------------------------------------------===//
3427 let ExeDomain = SSEPackedInt in {
3429 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3430 "pmovmskb\t{$src, $dst|$dst, $src}",
3431 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
3432 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
3433 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
3434 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3435 "pmovmskb\t{$src, $dst|$dst, $src}",
3436 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
3438 } // ExeDomain = SSEPackedInt
3440 //===---------------------------------------------------------------------===//
3441 // SSE2 - Conditional Store
3442 //===---------------------------------------------------------------------===//
3444 let ExeDomain = SSEPackedInt in {
3447 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
3448 (ins VR128:$src, VR128:$mask),
3449 "maskmovdqu\t{$mask, $src|$src, $mask}",
3450 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
3452 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
3453 (ins VR128:$src, VR128:$mask),
3454 "maskmovdqu\t{$mask, $src|$src, $mask}",
3455 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
3458 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3459 "maskmovdqu\t{$mask, $src|$src, $mask}",
3460 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
3462 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3463 "maskmovdqu\t{$mask, $src|$src, $mask}",
3464 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
3466 } // ExeDomain = SSEPackedInt
3468 //===---------------------------------------------------------------------===//
3469 // SSE2 - Move Doubleword
3470 //===---------------------------------------------------------------------===//
3472 //===---------------------------------------------------------------------===//
3473 // Move Int Doubleword to Packed Double Int
3475 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3476 "movd\t{$src, $dst|$dst, $src}",
3478 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
3479 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3480 "movd\t{$src, $dst|$dst, $src}",
3482 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
3484 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3485 "mov{d|q}\t{$src, $dst|$dst, $src}",
3487 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
3488 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3489 "mov{d|q}\t{$src, $dst|$dst, $src}",
3490 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
3492 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3493 "movd\t{$src, $dst|$dst, $src}",
3495 (v4i32 (scalar_to_vector GR32:$src)))]>;
3496 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3497 "movd\t{$src, $dst|$dst, $src}",
3499 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
3500 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3501 "mov{d|q}\t{$src, $dst|$dst, $src}",
3503 (v2i64 (scalar_to_vector GR64:$src)))]>;
3504 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3505 "mov{d|q}\t{$src, $dst|$dst, $src}",
3506 [(set FR64:$dst, (bitconvert GR64:$src))]>;
3508 //===---------------------------------------------------------------------===//
3509 // Move Int Doubleword to Single Scalar
3511 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3512 "movd\t{$src, $dst|$dst, $src}",
3513 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
3515 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3516 "movd\t{$src, $dst|$dst, $src}",
3517 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
3519 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3520 "movd\t{$src, $dst|$dst, $src}",
3521 [(set FR32:$dst, (bitconvert GR32:$src))]>;
3523 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3524 "movd\t{$src, $dst|$dst, $src}",
3525 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
3527 //===---------------------------------------------------------------------===//
3528 // Move Packed Doubleword Int to Packed Double Int
3530 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3531 "movd\t{$src, $dst|$dst, $src}",
3532 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3534 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
3535 (ins i32mem:$dst, VR128:$src),
3536 "movd\t{$src, $dst|$dst, $src}",
3537 [(store (i32 (vector_extract (v4i32 VR128:$src),
3538 (iPTR 0))), addr:$dst)]>, VEX;
3539 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3540 "movd\t{$src, $dst|$dst, $src}",
3541 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3543 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3544 "movd\t{$src, $dst|$dst, $src}",
3545 [(store (i32 (vector_extract (v4i32 VR128:$src),
3546 (iPTR 0))), addr:$dst)]>;
3548 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3549 "mov{d|q}\t{$src, $dst|$dst, $src}",
3550 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
3552 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
3553 "movq\t{$src, $dst|$dst, $src}",
3554 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
3556 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
3557 "mov{d|q}\t{$src, $dst|$dst, $src}",
3558 [(set GR64:$dst, (bitconvert FR64:$src))]>;
3559 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
3560 "movq\t{$src, $dst|$dst, $src}",
3561 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
3563 //===---------------------------------------------------------------------===//
3564 // Move Scalar Single to Double Int
3566 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3567 "movd\t{$src, $dst|$dst, $src}",
3568 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3569 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3570 "movd\t{$src, $dst|$dst, $src}",
3571 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3572 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3573 "movd\t{$src, $dst|$dst, $src}",
3574 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3575 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3576 "movd\t{$src, $dst|$dst, $src}",
3577 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3579 //===---------------------------------------------------------------------===//
3580 // Patterns and instructions to describe movd/movq to XMM register zero-extends
3582 let AddedComplexity = 15 in {
3583 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3584 "movd\t{$src, $dst|$dst, $src}",
3585 [(set VR128:$dst, (v4i32 (X86vzmovl
3586 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3588 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3589 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3590 [(set VR128:$dst, (v2i64 (X86vzmovl
3591 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3594 let AddedComplexity = 15 in {
3595 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3596 "movd\t{$src, $dst|$dst, $src}",
3597 [(set VR128:$dst, (v4i32 (X86vzmovl
3598 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3599 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3600 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3601 [(set VR128:$dst, (v2i64 (X86vzmovl
3602 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3605 let AddedComplexity = 20 in {
3606 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3607 "movd\t{$src, $dst|$dst, $src}",
3609 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3610 (loadi32 addr:$src))))))]>,
3612 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3613 "movd\t{$src, $dst|$dst, $src}",
3615 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3616 (loadi32 addr:$src))))))]>;
3618 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3619 (MOVZDI2PDIrm addr:$src)>;
3620 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3621 (MOVZDI2PDIrm addr:$src)>;
3622 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3623 (MOVZDI2PDIrm addr:$src)>;
3626 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3627 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3628 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3629 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
3630 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
3631 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3632 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
3633 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
3635 // These are the correct encodings of the instructions so that we know how to
3636 // read correct assembly, even though we continue to emit the wrong ones for
3637 // compatibility with Darwin's buggy assembler.
3638 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3639 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
3640 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3641 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
3642 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3643 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
3644 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3645 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
3646 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3647 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3648 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3649 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3651 //===---------------------------------------------------------------------===//
3652 // SSE2 - Move Quadword
3653 //===---------------------------------------------------------------------===//
3655 //===---------------------------------------------------------------------===//
3656 // Move Quadword Int to Packed Quadword Int
3658 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3659 "vmovq\t{$src, $dst|$dst, $src}",
3661 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3662 VEX, Requires<[HasAVX]>;
3663 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3664 "movq\t{$src, $dst|$dst, $src}",
3666 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3667 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3669 //===---------------------------------------------------------------------===//
3670 // Move Packed Quadword Int to Quadword Int
3672 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3673 "movq\t{$src, $dst|$dst, $src}",
3674 [(store (i64 (vector_extract (v2i64 VR128:$src),
3675 (iPTR 0))), addr:$dst)]>, VEX;
3676 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3677 "movq\t{$src, $dst|$dst, $src}",
3678 [(store (i64 (vector_extract (v2i64 VR128:$src),
3679 (iPTR 0))), addr:$dst)]>;
3681 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3682 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3684 //===---------------------------------------------------------------------===//
3685 // Store / copy lower 64-bits of a XMM register.
3687 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3688 "movq\t{$src, $dst|$dst, $src}",
3689 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3690 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3691 "movq\t{$src, $dst|$dst, $src}",
3692 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3694 let AddedComplexity = 20 in
3695 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3696 "vmovq\t{$src, $dst|$dst, $src}",
3698 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3699 (loadi64 addr:$src))))))]>,
3700 XS, VEX, Requires<[HasAVX]>;
3702 let AddedComplexity = 20 in {
3703 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3704 "movq\t{$src, $dst|$dst, $src}",
3706 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3707 (loadi64 addr:$src))))))]>,
3708 XS, Requires<[HasSSE2]>;
3710 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3711 (MOVZQI2PQIrm addr:$src)>;
3712 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3713 (MOVZQI2PQIrm addr:$src)>;
3714 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3717 //===---------------------------------------------------------------------===//
3718 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3719 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3721 let AddedComplexity = 15 in
3722 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3723 "vmovq\t{$src, $dst|$dst, $src}",
3724 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3725 XS, VEX, Requires<[HasAVX]>;
3726 let AddedComplexity = 15 in
3727 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3728 "movq\t{$src, $dst|$dst, $src}",
3729 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3730 XS, Requires<[HasSSE2]>;
3732 let AddedComplexity = 20 in
3733 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3734 "vmovq\t{$src, $dst|$dst, $src}",
3735 [(set VR128:$dst, (v2i64 (X86vzmovl
3736 (loadv2i64 addr:$src))))]>,
3737 XS, VEX, Requires<[HasAVX]>;
3738 let AddedComplexity = 20 in {
3739 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3740 "movq\t{$src, $dst|$dst, $src}",
3741 [(set VR128:$dst, (v2i64 (X86vzmovl
3742 (loadv2i64 addr:$src))))]>,
3743 XS, Requires<[HasSSE2]>;
3745 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3746 (MOVZPQILo2PQIrm addr:$src)>;
3749 // Instructions to match in the assembler
3750 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3751 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3752 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3753 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3754 // Recognize "movd" with GR64 destination, but encode as a "movq"
3755 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3756 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3758 // Instructions for the disassembler
3759 // xr = XMM register
3762 let Predicates = [HasAVX] in
3763 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3764 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3765 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3766 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3768 //===---------------------------------------------------------------------===//
3769 // SSE2 - Misc Instructions
3770 //===---------------------------------------------------------------------===//
3773 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3774 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3775 TB, Requires<[HasSSE2]>;
3777 // Load, store, and memory fence
3778 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3779 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3780 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3781 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3782 def : Pat<(X86LFence), (LFENCE)>;
3783 def : Pat<(X86MFence), (MFENCE)>;
3786 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3787 // was introduced with SSE2, it's backward compatible.
3788 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3790 // Alias instructions that map zero vector to pxor / xorp* for sse.
3791 // We set canFoldAsLoad because this can be converted to a constant-pool
3792 // load of an all-ones value if folding it would be beneficial.
3793 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
3794 // JIT implementation, it does not expand the instructions below like
3795 // X86MCInstLower does.
3796 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3797 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3798 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3799 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3800 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3801 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
3802 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3803 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
3805 //===---------------------------------------------------------------------===//
3806 // SSE3 - Conversion Instructions
3807 //===---------------------------------------------------------------------===//
3809 // Convert Packed Double FP to Packed DW Integers
3810 let Predicates = [HasAVX] in {
3811 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3812 // register, but the same isn't true when using memory operands instead.
3813 // Provide other assembly rr and rm forms to address this explicitly.
3814 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3815 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3816 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3817 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3820 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3821 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3822 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3823 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3826 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3827 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3828 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3829 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3832 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3833 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3834 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3835 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3837 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
3838 (VCVTPD2DQYrr VR256:$src)>;
3839 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
3840 (VCVTPD2DQYrm addr:$src)>;
3842 // Convert Packed DW Integers to Packed Double FP
3843 let Predicates = [HasAVX] in {
3844 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3845 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3846 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3847 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3848 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3849 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3850 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3851 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3854 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3855 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3856 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3857 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3859 // AVX 256-bit register conversion intrinsics
3860 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3861 (VCVTDQ2PDYrr VR128:$src)>;
3862 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3863 (VCVTDQ2PDYrm addr:$src)>;
3865 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3866 (VCVTPD2DQYrr VR256:$src)>;
3867 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3868 (VCVTPD2DQYrm addr:$src)>;
3870 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
3871 (VCVTDQ2PDYrr VR128:$src)>;
3872 def : Pat<(v4f64 (sint_to_fp (memopv4i32 addr:$src))),
3873 (VCVTDQ2PDYrm addr:$src)>;
3875 //===---------------------------------------------------------------------===//
3876 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
3877 //===---------------------------------------------------------------------===//
3878 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
3879 ValueType vt, RegisterClass RC, PatFrag mem_frag,
3880 X86MemOperand x86memop> {
3881 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3882 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3883 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
3884 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3885 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3886 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
3889 let Predicates = [HasAVX] in {
3890 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3891 v4f32, VR128, memopv4f32, f128mem>, VEX;
3892 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3893 v4f32, VR128, memopv4f32, f128mem>, VEX;
3894 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3895 v8f32, VR256, memopv8f32, f256mem>, VEX;
3896 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3897 v8f32, VR256, memopv8f32, f256mem>, VEX;
3899 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
3900 memopv4f32, f128mem>;
3901 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
3902 memopv4f32, f128mem>;
3904 let Predicates = [HasSSE3] in {
3905 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3906 (MOVSHDUPrr VR128:$src)>;
3907 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3908 (MOVSHDUPrm addr:$src)>;
3909 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3910 (MOVSLDUPrr VR128:$src)>;
3911 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3912 (MOVSLDUPrm addr:$src)>;
3915 let Predicates = [HasAVX] in {
3916 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3917 (VMOVSHDUPrr VR128:$src)>;
3918 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3919 (VMOVSHDUPrm addr:$src)>;
3920 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3921 (VMOVSLDUPrr VR128:$src)>;
3922 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3923 (VMOVSLDUPrm addr:$src)>;
3924 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
3925 (VMOVSHDUPYrr VR256:$src)>;
3926 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
3927 (VMOVSHDUPYrm addr:$src)>;
3928 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
3929 (VMOVSLDUPYrr VR256:$src)>;
3930 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
3931 (VMOVSLDUPYrm addr:$src)>;
3934 //===---------------------------------------------------------------------===//
3935 // SSE3 - Replicate Double FP - MOVDDUP
3936 //===---------------------------------------------------------------------===//
3938 multiclass sse3_replicate_dfp<string OpcodeStr> {
3939 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3940 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3941 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3942 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3943 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3945 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3949 // FIXME: Merge with above classe when there're patterns for the ymm version
3950 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3951 let Predicates = [HasAVX] in {
3952 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3953 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3955 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3956 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3961 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3962 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3963 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3965 let Predicates = [HasSSE3] in {
3966 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3968 (MOVDDUPrm addr:$src)>;
3969 let AddedComplexity = 5 in {
3970 def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
3971 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3972 (MOVDDUPrm addr:$src)>;
3973 def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
3974 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3975 (MOVDDUPrm addr:$src)>;
3977 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
3978 (MOVDDUPrm addr:$src)>;
3979 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
3980 (MOVDDUPrm addr:$src)>;
3981 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
3982 (MOVDDUPrm addr:$src)>;
3983 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
3984 (MOVDDUPrm addr:$src)>;
3985 def : Pat<(X86Movddup (bc_v2f64
3986 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
3987 (MOVDDUPrm addr:$src)>;
3990 let Predicates = [HasAVX] in {
3991 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3993 (VMOVDDUPrm addr:$src)>;
3994 let AddedComplexity = 5 in {
3995 def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
3996 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3997 (VMOVDDUPrm addr:$src)>;
3998 def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
3999 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
4000 (VMOVDDUPrm addr:$src)>;
4002 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4003 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4004 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4005 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4006 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4007 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4008 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
4009 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4010 def : Pat<(X86Movddup (bc_v2f64
4011 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4012 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4015 //===---------------------------------------------------------------------===//
4016 // SSE3 - Move Unaligned Integer
4017 //===---------------------------------------------------------------------===//
4019 let Predicates = [HasAVX] in {
4020 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4021 "vlddqu\t{$src, $dst|$dst, $src}",
4022 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
4023 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4024 "vlddqu\t{$src, $dst|$dst, $src}",
4025 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
4027 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4028 "lddqu\t{$src, $dst|$dst, $src}",
4029 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
4031 //===---------------------------------------------------------------------===//
4032 // SSE3 - Arithmetic
4033 //===---------------------------------------------------------------------===//
4035 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
4036 X86MemOperand x86memop, bit Is2Addr = 1> {
4037 def rr : I<0xD0, MRMSrcReg,
4038 (outs RC:$dst), (ins RC:$src1, RC:$src2),
4040 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4041 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4042 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
4043 def rm : I<0xD0, MRMSrcMem,
4044 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4046 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4047 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4048 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
4051 let Predicates = [HasAVX],
4052 ExeDomain = SSEPackedDouble in {
4053 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
4054 f128mem, 0>, TB, XD, VEX_4V;
4055 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
4056 f128mem, 0>, TB, OpSize, VEX_4V;
4057 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
4058 f256mem, 0>, TB, XD, VEX_4V;
4059 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
4060 f256mem, 0>, TB, OpSize, VEX_4V;
4062 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
4063 ExeDomain = SSEPackedDouble in {
4064 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
4066 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
4067 f128mem>, TB, OpSize;
4070 //===---------------------------------------------------------------------===//
4071 // SSE3 Instructions
4072 //===---------------------------------------------------------------------===//
4075 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4076 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
4077 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4079 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4080 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4081 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
4083 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4085 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4086 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4087 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
4089 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4090 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
4091 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4093 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4094 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4095 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
4097 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4099 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4100 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4101 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
4104 let Predicates = [HasAVX] in {
4105 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
4106 int_x86_sse3_hadd_ps, 0>, VEX_4V;
4107 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
4108 int_x86_sse3_hadd_pd, 0>, VEX_4V;
4109 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
4110 int_x86_sse3_hsub_ps, 0>, VEX_4V;
4111 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
4112 int_x86_sse3_hsub_pd, 0>, VEX_4V;
4113 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
4114 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
4115 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
4116 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
4117 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
4118 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
4119 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
4120 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
4123 let Constraints = "$src1 = $dst" in {
4124 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
4125 int_x86_sse3_hadd_ps>;
4126 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
4127 int_x86_sse3_hadd_pd>;
4128 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
4129 int_x86_sse3_hsub_ps>;
4130 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
4131 int_x86_sse3_hsub_pd>;
4134 //===---------------------------------------------------------------------===//
4135 // SSSE3 - Packed Absolute Instructions
4136 //===---------------------------------------------------------------------===//
4139 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
4140 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
4141 PatFrag mem_frag128, Intrinsic IntId128> {
4142 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4144 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4145 [(set VR128:$dst, (IntId128 VR128:$src))]>,
4148 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4150 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4153 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
4156 let Predicates = [HasAVX] in {
4157 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
4158 int_x86_ssse3_pabs_b_128>, VEX;
4159 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
4160 int_x86_ssse3_pabs_w_128>, VEX;
4161 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
4162 int_x86_ssse3_pabs_d_128>, VEX;
4165 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
4166 int_x86_ssse3_pabs_b_128>;
4167 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
4168 int_x86_ssse3_pabs_w_128>;
4169 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
4170 int_x86_ssse3_pabs_d_128>;
4172 //===---------------------------------------------------------------------===//
4173 // SSSE3 - Packed Binary Operator Instructions
4174 //===---------------------------------------------------------------------===//
4176 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
4177 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
4178 PatFrag mem_frag128, Intrinsic IntId128,
4180 let isCommutable = 1 in
4181 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4182 (ins VR128:$src1, VR128:$src2),
4184 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4185 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4186 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4188 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4189 (ins VR128:$src1, i128mem:$src2),
4191 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4192 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4194 (IntId128 VR128:$src1,
4195 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4198 let Predicates = [HasAVX] in {
4199 let isCommutable = 0 in {
4200 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
4201 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
4202 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
4203 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
4204 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
4205 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
4206 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
4207 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
4208 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
4209 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
4210 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
4211 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
4212 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
4213 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
4214 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
4215 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
4216 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
4217 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
4218 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
4219 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
4220 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
4221 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
4223 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
4224 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
4227 // None of these have i8 immediate fields.
4228 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
4229 let isCommutable = 0 in {
4230 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
4231 int_x86_ssse3_phadd_w_128>;
4232 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
4233 int_x86_ssse3_phadd_d_128>;
4234 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
4235 int_x86_ssse3_phadd_sw_128>;
4236 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
4237 int_x86_ssse3_phsub_w_128>;
4238 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
4239 int_x86_ssse3_phsub_d_128>;
4240 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
4241 int_x86_ssse3_phsub_sw_128>;
4242 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
4243 int_x86_ssse3_pmadd_ub_sw_128>;
4244 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
4245 int_x86_ssse3_pshuf_b_128>;
4246 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
4247 int_x86_ssse3_psign_b_128>;
4248 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
4249 int_x86_ssse3_psign_w_128>;
4250 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
4251 int_x86_ssse3_psign_d_128>;
4253 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
4254 int_x86_ssse3_pmul_hr_sw_128>;
4257 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
4258 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
4259 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
4260 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
4262 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
4263 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4264 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
4265 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4266 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
4267 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4269 //===---------------------------------------------------------------------===//
4270 // SSSE3 - Packed Align Instruction Patterns
4271 //===---------------------------------------------------------------------===//
4273 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
4274 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
4275 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4277 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4279 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4281 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
4282 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4284 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4286 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4290 let Predicates = [HasAVX] in
4291 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
4292 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
4293 defm PALIGN : ssse3_palign<"palignr">;
4295 let Predicates = [HasSSSE3] in {
4296 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4297 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4298 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4299 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4300 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4301 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4302 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4303 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4306 let Predicates = [HasAVX] in {
4307 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4308 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4309 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4310 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4311 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4312 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4313 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4314 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4317 //===---------------------------------------------------------------------===//
4318 // SSSE3 Misc Instructions
4319 //===---------------------------------------------------------------------===//
4321 // Thread synchronization
4322 let usesCustomInserter = 1 in {
4323 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
4324 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
4325 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
4326 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
4329 let Uses = [EAX, ECX, EDX] in
4330 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
4331 Requires<[HasSSE3]>;
4332 let Uses = [ECX, EAX] in
4333 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
4334 Requires<[HasSSE3]>;
4336 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
4337 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
4339 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
4340 Requires<[In32BitMode]>;
4341 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
4342 Requires<[In64BitMode]>;
4344 // extload f32 -> f64. This matches load+fextend because we have a hack in
4345 // the isel (PreprocessForFPConvert) that can introduce loads after dag
4347 // Since these loads aren't folded into the fextend, we have to match it
4349 let Predicates = [HasSSE2] in
4350 def : Pat<(fextend (loadf32 addr:$src)),
4351 (CVTSS2SDrm addr:$src)>;
4353 // Move scalar to XMM zero-extended
4354 // movd to XMM register zero-extends
4355 let AddedComplexity = 15 in {
4356 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
4357 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
4358 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
4359 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
4360 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
4361 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
4362 (MOVSSrr (v4f32 (V_SET0PS)),
4363 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
4364 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
4365 (MOVSSrr (v4i32 (V_SET0PI)),
4366 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
4369 // Splat v2f64 / v2i64
4370 let AddedComplexity = 10 in {
4371 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
4372 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
4375 let AddedComplexity = 20 in {
4376 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
4377 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
4378 (MOVLPSrm VR128:$src1, addr:$src2)>;
4379 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
4380 (MOVLPDrm VR128:$src1, addr:$src2)>;
4381 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
4382 (MOVLPSrm VR128:$src1, addr:$src2)>;
4383 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
4384 (MOVLPDrm VR128:$src1, addr:$src2)>;
4387 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
4388 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4389 (MOVLPSmr addr:$src1, VR128:$src2)>;
4390 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4391 (MOVLPDmr addr:$src1, VR128:$src2)>;
4392 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
4394 (MOVLPSmr addr:$src1, VR128:$src2)>;
4395 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4396 (MOVLPDmr addr:$src1, VR128:$src2)>;
4398 let AddedComplexity = 15 in {
4399 // Setting the lowest element in the vector.
4400 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
4401 (MOVSSrr (v4i32 VR128:$src1),
4402 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
4403 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
4404 (MOVSDrr (v2i64 VR128:$src1),
4405 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
4407 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
4408 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
4409 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4410 Requires<[HasSSE2]>;
4411 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
4412 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4413 Requires<[HasSSE2]>;
4416 // Set lowest element and zero upper elements.
4417 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4418 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
4420 // Use movaps / movups for SSE integer load / store (one byte shorter).
4421 // The instructions selected below are then converted to MOVDQA/MOVDQU
4422 // during the SSE domain pass.
4423 let Predicates = [HasSSE1] in {
4424 def : Pat<(alignedloadv4i32 addr:$src),
4425 (MOVAPSrm addr:$src)>;
4426 def : Pat<(loadv4i32 addr:$src),
4427 (MOVUPSrm addr:$src)>;
4428 def : Pat<(alignedloadv2i64 addr:$src),
4429 (MOVAPSrm addr:$src)>;
4430 def : Pat<(loadv2i64 addr:$src),
4431 (MOVUPSrm addr:$src)>;
4433 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4434 (MOVAPSmr addr:$dst, VR128:$src)>;
4435 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4436 (MOVAPSmr addr:$dst, VR128:$src)>;
4437 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4438 (MOVAPSmr addr:$dst, VR128:$src)>;
4439 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4440 (MOVAPSmr addr:$dst, VR128:$src)>;
4441 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4442 (MOVUPSmr addr:$dst, VR128:$src)>;
4443 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4444 (MOVUPSmr addr:$dst, VR128:$src)>;
4445 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4446 (MOVUPSmr addr:$dst, VR128:$src)>;
4447 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4448 (MOVUPSmr addr:$dst, VR128:$src)>;
4451 // Use vmovaps/vmovups for AVX integer load/store.
4452 let Predicates = [HasAVX] in {
4453 // 128-bit load/store
4454 def : Pat<(alignedloadv4i32 addr:$src),
4455 (VMOVAPSrm addr:$src)>;
4456 def : Pat<(loadv4i32 addr:$src),
4457 (VMOVUPSrm addr:$src)>;
4458 def : Pat<(alignedloadv2i64 addr:$src),
4459 (VMOVAPSrm addr:$src)>;
4460 def : Pat<(loadv2i64 addr:$src),
4461 (VMOVUPSrm addr:$src)>;
4463 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4464 (VMOVAPSmr addr:$dst, VR128:$src)>;
4465 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4466 (VMOVAPSmr addr:$dst, VR128:$src)>;
4467 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4468 (VMOVAPSmr addr:$dst, VR128:$src)>;
4469 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4470 (VMOVAPSmr addr:$dst, VR128:$src)>;
4471 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4472 (VMOVUPSmr addr:$dst, VR128:$src)>;
4473 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4474 (VMOVUPSmr addr:$dst, VR128:$src)>;
4475 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4476 (VMOVUPSmr addr:$dst, VR128:$src)>;
4477 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4478 (VMOVUPSmr addr:$dst, VR128:$src)>;
4480 // 256-bit load/store
4481 def : Pat<(alignedloadv4i64 addr:$src),
4482 (VMOVAPSYrm addr:$src)>;
4483 def : Pat<(loadv4i64 addr:$src),
4484 (VMOVUPSYrm addr:$src)>;
4485 def : Pat<(alignedloadv8i32 addr:$src),
4486 (VMOVAPSYrm addr:$src)>;
4487 def : Pat<(loadv8i32 addr:$src),
4488 (VMOVUPSYrm addr:$src)>;
4489 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
4490 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4491 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
4492 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4493 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
4494 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4495 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
4496 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4497 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
4498 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4499 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
4500 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4501 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
4502 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4503 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
4504 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4507 //===----------------------------------------------------------------------===//
4508 // SSE4.1 - Packed Move with Sign/Zero Extend
4509 //===----------------------------------------------------------------------===//
4511 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4512 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4513 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4514 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4516 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4517 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4519 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4523 let Predicates = [HasAVX] in {
4524 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4526 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4528 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4530 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4532 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4534 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4538 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4539 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4540 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4541 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4542 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4543 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4545 // Common patterns involving scalar load.
4546 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4547 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4548 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4549 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4551 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4552 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4553 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4554 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4556 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4557 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4558 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4559 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4561 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4562 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4563 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4564 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4566 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4567 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4568 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4569 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4571 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4572 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4573 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4574 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4577 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4578 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4579 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4580 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4582 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4583 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4585 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4589 let Predicates = [HasAVX] in {
4590 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4592 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4594 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4596 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4600 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4601 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4602 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4603 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4605 // Common patterns involving scalar load
4606 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4607 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4608 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4609 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4611 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4612 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4613 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4614 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4617 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4618 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4619 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4620 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4622 // Expecting a i16 load any extended to i32 value.
4623 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4624 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4625 [(set VR128:$dst, (IntId (bitconvert
4626 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4630 let Predicates = [HasAVX] in {
4631 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4633 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4636 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4637 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4639 // Common patterns involving scalar load
4640 def : Pat<(int_x86_sse41_pmovsxbq
4641 (bitconvert (v4i32 (X86vzmovl
4642 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4643 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4645 def : Pat<(int_x86_sse41_pmovzxbq
4646 (bitconvert (v4i32 (X86vzmovl
4647 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4648 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4650 //===----------------------------------------------------------------------===//
4651 // SSE4.1 - Extract Instructions
4652 //===----------------------------------------------------------------------===//
4654 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4655 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4656 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4657 (ins VR128:$src1, i32i8imm:$src2),
4658 !strconcat(OpcodeStr,
4659 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4660 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4662 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4663 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4664 !strconcat(OpcodeStr,
4665 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4668 // There's an AssertZext in the way of writing the store pattern
4669 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4672 let Predicates = [HasAVX] in {
4673 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4674 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4675 (ins VR128:$src1, i32i8imm:$src2),
4676 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4679 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4682 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4683 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4684 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4685 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4686 !strconcat(OpcodeStr,
4687 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4690 // There's an AssertZext in the way of writing the store pattern
4691 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4694 let Predicates = [HasAVX] in
4695 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4697 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4700 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4701 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4702 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4703 (ins VR128:$src1, i32i8imm:$src2),
4704 !strconcat(OpcodeStr,
4705 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4707 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4708 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4709 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4710 !strconcat(OpcodeStr,
4711 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4712 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4713 addr:$dst)]>, OpSize;
4716 let Predicates = [HasAVX] in
4717 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4719 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4721 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4722 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4723 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4724 (ins VR128:$src1, i32i8imm:$src2),
4725 !strconcat(OpcodeStr,
4726 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4728 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4729 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4730 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4731 !strconcat(OpcodeStr,
4732 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4733 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4734 addr:$dst)]>, OpSize, REX_W;
4737 let Predicates = [HasAVX] in
4738 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4740 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4742 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4744 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4745 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4746 (ins VR128:$src1, i32i8imm:$src2),
4747 !strconcat(OpcodeStr,
4748 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4750 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4752 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4753 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4754 !strconcat(OpcodeStr,
4755 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4756 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4757 addr:$dst)]>, OpSize;
4760 let Predicates = [HasAVX] in {
4761 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4762 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4763 (ins VR128:$src1, i32i8imm:$src2),
4764 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4767 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4769 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4770 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4773 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4774 Requires<[HasSSE41]>;
4776 //===----------------------------------------------------------------------===//
4777 // SSE4.1 - Insert Instructions
4778 //===----------------------------------------------------------------------===//
4780 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4781 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4782 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4784 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4786 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4788 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4789 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4790 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4792 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4794 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4796 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4797 imm:$src3))]>, OpSize;
4800 let Predicates = [HasAVX] in
4801 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4802 let Constraints = "$src1 = $dst" in
4803 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4805 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4806 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4807 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4809 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4811 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4813 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4815 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4816 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4818 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4820 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4822 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4823 imm:$src3)))]>, OpSize;
4826 let Predicates = [HasAVX] in
4827 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4828 let Constraints = "$src1 = $dst" in
4829 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4831 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4832 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4833 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4835 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4837 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4839 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4841 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4842 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4844 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4846 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4848 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4849 imm:$src3)))]>, OpSize;
4852 let Predicates = [HasAVX] in
4853 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4854 let Constraints = "$src1 = $dst" in
4855 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4857 // insertps has a few different modes, there's the first two here below which
4858 // are optimized inserts that won't zero arbitrary elements in the destination
4859 // vector. The next one matches the intrinsic and could zero arbitrary elements
4860 // in the target vector.
4861 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4862 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4863 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
4865 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4867 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4869 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4871 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4872 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
4874 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4876 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4878 (X86insrtps VR128:$src1,
4879 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4880 imm:$src3))]>, OpSize;
4883 let Constraints = "$src1 = $dst" in
4884 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4885 let Predicates = [HasAVX] in
4886 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4888 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4889 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4891 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4892 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4893 Requires<[HasSSE41]>;
4895 //===----------------------------------------------------------------------===//
4896 // SSE4.1 - Round Instructions
4897 //===----------------------------------------------------------------------===//
4899 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4900 X86MemOperand x86memop, RegisterClass RC,
4901 PatFrag mem_frag32, PatFrag mem_frag64,
4902 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4903 // Intrinsic operation, reg.
4904 // Vector intrinsic operation, reg
4905 def PSr : SS4AIi8<opcps, MRMSrcReg,
4906 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4907 !strconcat(OpcodeStr,
4908 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4909 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4912 // Vector intrinsic operation, mem
4913 def PSm : Ii8<opcps, MRMSrcMem,
4914 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4915 !strconcat(OpcodeStr,
4916 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4918 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4920 Requires<[HasSSE41]>;
4922 // Vector intrinsic operation, reg
4923 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4924 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4925 !strconcat(OpcodeStr,
4926 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4927 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4930 // Vector intrinsic operation, mem
4931 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4932 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4933 !strconcat(OpcodeStr,
4934 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4936 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4940 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4941 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4942 // Intrinsic operation, reg.
4943 // Vector intrinsic operation, reg
4944 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4945 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4946 !strconcat(OpcodeStr,
4947 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4950 // Vector intrinsic operation, mem
4951 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4952 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4953 !strconcat(OpcodeStr,
4954 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4955 []>, TA, OpSize, Requires<[HasSSE41]>;
4957 // Vector intrinsic operation, reg
4958 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4959 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4960 !strconcat(OpcodeStr,
4961 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4964 // Vector intrinsic operation, mem
4965 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4966 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4967 !strconcat(OpcodeStr,
4968 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4972 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4975 Intrinsic F64Int, bit Is2Addr = 1> {
4976 // Intrinsic operation, reg.
4977 def SSr : SS4AIi8<opcss, MRMSrcReg,
4978 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4980 !strconcat(OpcodeStr,
4981 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4982 !strconcat(OpcodeStr,
4983 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4984 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4987 // Intrinsic operation, mem.
4988 def SSm : SS4AIi8<opcss, MRMSrcMem,
4989 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4991 !strconcat(OpcodeStr,
4992 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4993 !strconcat(OpcodeStr,
4994 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4996 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4999 // Intrinsic operation, reg.
5000 def SDr : SS4AIi8<opcsd, MRMSrcReg,
5001 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5003 !strconcat(OpcodeStr,
5004 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5005 !strconcat(OpcodeStr,
5006 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5007 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
5010 // Intrinsic operation, mem.
5011 def SDm : SS4AIi8<opcsd, MRMSrcMem,
5012 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
5014 !strconcat(OpcodeStr,
5015 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5016 !strconcat(OpcodeStr,
5017 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5019 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
5023 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
5025 // Intrinsic operation, reg.
5026 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
5027 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5028 !strconcat(OpcodeStr,
5029 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5032 // Intrinsic operation, mem.
5033 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
5034 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
5035 !strconcat(OpcodeStr,
5036 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5039 // Intrinsic operation, reg.
5040 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
5041 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5042 !strconcat(OpcodeStr,
5043 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5046 // Intrinsic operation, mem.
5047 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
5048 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
5049 !strconcat(OpcodeStr,
5050 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5054 // FP round - roundss, roundps, roundsd, roundpd
5055 let Predicates = [HasAVX] in {
5057 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
5058 memopv4f32, memopv2f64,
5059 int_x86_sse41_round_ps,
5060 int_x86_sse41_round_pd>, VEX;
5061 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
5062 memopv8f32, memopv4f64,
5063 int_x86_avx_round_ps_256,
5064 int_x86_avx_round_pd_256>, VEX;
5065 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
5066 int_x86_sse41_round_ss,
5067 int_x86_sse41_round_sd, 0>, VEX_4V;
5069 // Instructions for the assembler
5070 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
5072 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
5074 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
5077 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
5078 memopv4f32, memopv2f64,
5079 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
5080 let Constraints = "$src1 = $dst" in
5081 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
5082 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
5084 //===----------------------------------------------------------------------===//
5085 // SSE4.1 - Packed Bit Test
5086 //===----------------------------------------------------------------------===//
5088 // ptest instruction we'll lower to this in X86ISelLowering primarily from
5089 // the intel intrinsic that corresponds to this.
5090 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5091 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5092 "vptest\t{$src2, $src1|$src1, $src2}",
5093 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
5095 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5096 "vptest\t{$src2, $src1|$src1, $src2}",
5097 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
5100 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
5101 "vptest\t{$src2, $src1|$src1, $src2}",
5102 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
5104 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
5105 "vptest\t{$src2, $src1|$src1, $src2}",
5106 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
5110 let Defs = [EFLAGS] in {
5111 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5112 "ptest \t{$src2, $src1|$src1, $src2}",
5113 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
5115 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5116 "ptest \t{$src2, $src1|$src1, $src2}",
5117 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
5121 // The bit test instructions below are AVX only
5122 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
5123 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
5124 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
5125 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5126 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
5127 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
5128 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5129 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
5133 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5134 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
5135 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
5136 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
5137 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
5140 //===----------------------------------------------------------------------===//
5141 // SSE4.1 - Misc Instructions
5142 //===----------------------------------------------------------------------===//
5144 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
5145 "popcnt{w}\t{$src, $dst|$dst, $src}",
5146 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
5147 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
5148 "popcnt{w}\t{$src, $dst|$dst, $src}",
5149 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
5151 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
5152 "popcnt{l}\t{$src, $dst|$dst, $src}",
5153 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
5154 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
5155 "popcnt{l}\t{$src, $dst|$dst, $src}",
5156 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
5158 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
5159 "popcnt{q}\t{$src, $dst|$dst, $src}",
5160 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
5161 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
5162 "popcnt{q}\t{$src, $dst|$dst, $src}",
5163 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
5167 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
5168 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
5169 Intrinsic IntId128> {
5170 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5172 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5173 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
5174 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5176 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5179 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
5182 let Predicates = [HasAVX] in
5183 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
5184 int_x86_sse41_phminposuw>, VEX;
5185 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
5186 int_x86_sse41_phminposuw>;
5188 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
5189 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
5190 Intrinsic IntId128, bit Is2Addr = 1> {
5191 let isCommutable = 1 in
5192 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5193 (ins VR128:$src1, VR128:$src2),
5195 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5196 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5197 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
5198 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5199 (ins VR128:$src1, i128mem:$src2),
5201 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5202 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5204 (IntId128 VR128:$src1,
5205 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5208 let Predicates = [HasAVX] in {
5209 let isCommutable = 0 in
5210 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
5212 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
5214 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
5216 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
5218 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
5220 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
5222 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
5224 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
5226 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
5228 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
5230 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
5233 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5234 (VPCMPEQQrr VR128:$src1, VR128:$src2)>;
5235 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5236 (VPCMPEQQrm VR128:$src1, addr:$src2)>;
5239 let Constraints = "$src1 = $dst" in {
5240 let isCommutable = 0 in
5241 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
5242 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
5243 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
5244 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
5245 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
5246 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
5247 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
5248 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
5249 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
5250 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
5251 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
5254 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5255 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
5256 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5257 (PCMPEQQrm VR128:$src1, addr:$src2)>;
5259 /// SS48I_binop_rm - Simple SSE41 binary operator.
5260 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5261 ValueType OpVT, bit Is2Addr = 1> {
5262 let isCommutable = 1 in
5263 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5264 (ins VR128:$src1, VR128:$src2),
5266 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5267 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5268 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
5270 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5271 (ins VR128:$src1, i128mem:$src2),
5273 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5274 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5275 [(set VR128:$dst, (OpNode VR128:$src1,
5276 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
5280 let Predicates = [HasAVX] in
5281 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
5282 let Constraints = "$src1 = $dst" in
5283 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
5285 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
5286 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
5287 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
5288 X86MemOperand x86memop, bit Is2Addr = 1> {
5289 let isCommutable = 1 in
5290 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
5291 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
5293 !strconcat(OpcodeStr,
5294 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5295 !strconcat(OpcodeStr,
5296 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5297 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
5299 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
5300 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
5302 !strconcat(OpcodeStr,
5303 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5304 !strconcat(OpcodeStr,
5305 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5308 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
5312 let Predicates = [HasAVX] in {
5313 let isCommutable = 0 in {
5314 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
5315 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5316 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
5317 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5318 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
5319 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5320 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
5321 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5322 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
5323 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5324 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
5325 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5327 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
5328 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5329 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
5330 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5331 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
5332 VR256, memopv32i8, i256mem, 0>, VEX_4V;
5335 let Constraints = "$src1 = $dst" in {
5336 let isCommutable = 0 in {
5337 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
5338 VR128, memopv16i8, i128mem>;
5339 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
5340 VR128, memopv16i8, i128mem>;
5341 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
5342 VR128, memopv16i8, i128mem>;
5343 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
5344 VR128, memopv16i8, i128mem>;
5346 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
5347 VR128, memopv16i8, i128mem>;
5348 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
5349 VR128, memopv16i8, i128mem>;
5352 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
5353 let Predicates = [HasAVX] in {
5354 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
5355 RegisterClass RC, X86MemOperand x86memop,
5356 PatFrag mem_frag, Intrinsic IntId> {
5357 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
5358 (ins RC:$src1, RC:$src2, RC:$src3),
5359 !strconcat(OpcodeStr,
5360 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5361 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
5362 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5364 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
5365 (ins RC:$src1, x86memop:$src2, RC:$src3),
5366 !strconcat(OpcodeStr,
5367 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5369 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
5371 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5375 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
5376 memopv16i8, int_x86_sse41_blendvpd>;
5377 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
5378 memopv16i8, int_x86_sse41_blendvps>;
5379 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
5380 memopv16i8, int_x86_sse41_pblendvb>;
5381 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
5382 memopv32i8, int_x86_avx_blendv_pd_256>;
5383 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
5384 memopv32i8, int_x86_avx_blendv_ps_256>;
5386 /// SS41I_ternary_int - SSE 4.1 ternary operator
5387 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
5388 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5389 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5390 (ins VR128:$src1, VR128:$src2),
5391 !strconcat(OpcodeStr,
5392 "\t{$src2, $dst|$dst, $src2}"),
5393 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
5396 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5397 (ins VR128:$src1, i128mem:$src2),
5398 !strconcat(OpcodeStr,
5399 "\t{$src2, $dst|$dst, $src2}"),
5402 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
5406 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
5407 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
5408 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
5410 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
5411 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
5413 let Predicates = [HasAVX] in
5414 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5415 "vmovntdqa\t{$src, $dst|$dst, $src}",
5416 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5418 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5419 "movntdqa\t{$src, $dst|$dst, $src}",
5420 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5423 //===----------------------------------------------------------------------===//
5424 // SSE4.2 - Compare Instructions
5425 //===----------------------------------------------------------------------===//
5427 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
5428 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
5429 Intrinsic IntId128, bit Is2Addr = 1> {
5430 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
5431 (ins VR128:$src1, VR128:$src2),
5433 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5434 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5435 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5437 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
5438 (ins VR128:$src1, i128mem:$src2),
5440 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5441 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5443 (IntId128 VR128:$src1,
5444 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5447 let Predicates = [HasAVX] in {
5448 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
5451 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5452 (VPCMPGTQrr VR128:$src1, VR128:$src2)>;
5453 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5454 (VPCMPGTQrm VR128:$src1, addr:$src2)>;
5457 let Constraints = "$src1 = $dst" in
5458 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
5460 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5461 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
5462 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5463 (PCMPGTQrm VR128:$src1, addr:$src2)>;
5465 //===----------------------------------------------------------------------===//
5466 // SSE4.2 - String/text Processing Instructions
5467 //===----------------------------------------------------------------------===//
5469 // Packed Compare Implicit Length Strings, Return Mask
5470 multiclass pseudo_pcmpistrm<string asm> {
5471 def REG : PseudoI<(outs VR128:$dst),
5472 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5473 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5475 def MEM : PseudoI<(outs VR128:$dst),
5476 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5477 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5478 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5481 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5482 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5483 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5486 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
5487 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5488 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5489 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5490 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5491 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5492 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5495 let Defs = [XMM0, EFLAGS] in {
5496 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5497 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5498 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5499 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5500 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5501 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5504 // Packed Compare Explicit Length Strings, Return Mask
5505 multiclass pseudo_pcmpestrm<string asm> {
5506 def REG : PseudoI<(outs VR128:$dst),
5507 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5508 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5509 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5510 def MEM : PseudoI<(outs VR128:$dst),
5511 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5512 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5513 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5516 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5517 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5518 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5521 let Predicates = [HasAVX],
5522 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5523 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5524 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5525 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5526 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5527 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5528 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5531 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5532 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5533 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5534 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5535 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5536 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5537 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5540 // Packed Compare Implicit Length Strings, Return Index
5541 let Defs = [ECX, EFLAGS] in {
5542 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5543 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5544 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5545 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5546 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5547 (implicit EFLAGS)]>, OpSize;
5548 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5549 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5550 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5551 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5552 (implicit EFLAGS)]>, OpSize;
5556 let Predicates = [HasAVX] in {
5557 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5559 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5561 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5563 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5565 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5567 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5571 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5572 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5573 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5574 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5575 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5576 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5578 // Packed Compare Explicit Length Strings, Return Index
5579 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5580 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5581 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5582 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5583 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5584 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5585 (implicit EFLAGS)]>, OpSize;
5586 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5587 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5588 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5590 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5591 (implicit EFLAGS)]>, OpSize;
5595 let Predicates = [HasAVX] in {
5596 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5598 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5600 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5602 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5604 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5606 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5610 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5611 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5612 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5613 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5614 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5615 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5617 //===----------------------------------------------------------------------===//
5618 // SSE4.2 - CRC Instructions
5619 //===----------------------------------------------------------------------===//
5621 // No CRC instructions have AVX equivalents
5623 // crc intrinsic instruction
5624 // This set of instructions are only rm, the only difference is the size
5626 let Constraints = "$src1 = $dst" in {
5627 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5628 (ins GR32:$src1, i8mem:$src2),
5629 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5631 (int_x86_sse42_crc32_32_8 GR32:$src1,
5632 (load addr:$src2)))]>;
5633 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5634 (ins GR32:$src1, GR8:$src2),
5635 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5637 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5638 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5639 (ins GR32:$src1, i16mem:$src2),
5640 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5642 (int_x86_sse42_crc32_32_16 GR32:$src1,
5643 (load addr:$src2)))]>,
5645 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5646 (ins GR32:$src1, GR16:$src2),
5647 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5649 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5651 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5652 (ins GR32:$src1, i32mem:$src2),
5653 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5655 (int_x86_sse42_crc32_32_32 GR32:$src1,
5656 (load addr:$src2)))]>;
5657 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5658 (ins GR32:$src1, GR32:$src2),
5659 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5661 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5662 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5663 (ins GR64:$src1, i8mem:$src2),
5664 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5666 (int_x86_sse42_crc32_64_8 GR64:$src1,
5667 (load addr:$src2)))]>,
5669 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5670 (ins GR64:$src1, GR8:$src2),
5671 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5673 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
5675 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5676 (ins GR64:$src1, i64mem:$src2),
5677 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5679 (int_x86_sse42_crc32_64_64 GR64:$src1,
5680 (load addr:$src2)))]>,
5682 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5683 (ins GR64:$src1, GR64:$src2),
5684 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5686 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
5690 //===----------------------------------------------------------------------===//
5691 // AES-NI Instructions
5692 //===----------------------------------------------------------------------===//
5694 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5695 Intrinsic IntId128, bit Is2Addr = 1> {
5696 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5697 (ins VR128:$src1, VR128:$src2),
5699 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5700 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5701 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5703 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5704 (ins VR128:$src1, i128mem:$src2),
5706 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5707 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5709 (IntId128 VR128:$src1,
5710 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5713 // Perform One Round of an AES Encryption/Decryption Flow
5714 let Predicates = [HasAVX, HasAES] in {
5715 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5716 int_x86_aesni_aesenc, 0>, VEX_4V;
5717 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5718 int_x86_aesni_aesenclast, 0>, VEX_4V;
5719 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5720 int_x86_aesni_aesdec, 0>, VEX_4V;
5721 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5722 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5725 let Constraints = "$src1 = $dst" in {
5726 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5727 int_x86_aesni_aesenc>;
5728 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5729 int_x86_aesni_aesenclast>;
5730 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5731 int_x86_aesni_aesdec>;
5732 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5733 int_x86_aesni_aesdeclast>;
5736 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5737 (AESENCrr VR128:$src1, VR128:$src2)>;
5738 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5739 (AESENCrm VR128:$src1, addr:$src2)>;
5740 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5741 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5742 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5743 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5744 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5745 (AESDECrr VR128:$src1, VR128:$src2)>;
5746 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5747 (AESDECrm VR128:$src1, addr:$src2)>;
5748 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5749 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5750 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5751 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5753 // Perform the AES InvMixColumn Transformation
5754 let Predicates = [HasAVX, HasAES] in {
5755 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5757 "vaesimc\t{$src1, $dst|$dst, $src1}",
5759 (int_x86_aesni_aesimc VR128:$src1))]>,
5761 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5762 (ins i128mem:$src1),
5763 "vaesimc\t{$src1, $dst|$dst, $src1}",
5765 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5768 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5770 "aesimc\t{$src1, $dst|$dst, $src1}",
5772 (int_x86_aesni_aesimc VR128:$src1))]>,
5774 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5775 (ins i128mem:$src1),
5776 "aesimc\t{$src1, $dst|$dst, $src1}",
5778 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5781 // AES Round Key Generation Assist
5782 let Predicates = [HasAVX, HasAES] in {
5783 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5784 (ins VR128:$src1, i8imm:$src2),
5785 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5787 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5789 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5790 (ins i128mem:$src1, i8imm:$src2),
5791 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5793 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5797 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5798 (ins VR128:$src1, i8imm:$src2),
5799 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5801 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5803 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5804 (ins i128mem:$src1, i8imm:$src2),
5805 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5807 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5811 //===----------------------------------------------------------------------===//
5812 // CLMUL Instructions
5813 //===----------------------------------------------------------------------===//
5815 // Carry-less Multiplication instructions
5816 let Constraints = "$src1 = $dst" in {
5817 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5818 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5819 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5822 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5823 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5824 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5828 // AVX carry-less Multiplication instructions
5829 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5830 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5831 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5834 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5835 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5836 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5840 multiclass pclmul_alias<string asm, int immop> {
5841 def : InstAlias<!strconcat("pclmul", asm,
5842 "dq {$src, $dst|$dst, $src}"),
5843 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
5845 def : InstAlias<!strconcat("pclmul", asm,
5846 "dq {$src, $dst|$dst, $src}"),
5847 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
5849 def : InstAlias<!strconcat("vpclmul", asm,
5850 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5851 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
5853 def : InstAlias<!strconcat("vpclmul", asm,
5854 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5855 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
5857 defm : pclmul_alias<"hqhq", 0x11>;
5858 defm : pclmul_alias<"hqlq", 0x01>;
5859 defm : pclmul_alias<"lqhq", 0x10>;
5860 defm : pclmul_alias<"lqlq", 0x00>;
5862 //===----------------------------------------------------------------------===//
5864 //===----------------------------------------------------------------------===//
5866 //===----------------------------------------------------------------------===//
5867 // VBROADCAST - Load from memory and broadcast to all elements of the
5868 // destination operand
5870 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5871 X86MemOperand x86memop, Intrinsic Int> :
5872 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5873 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5874 [(set RC:$dst, (Int addr:$src))]>, VEX;
5876 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5877 int_x86_avx_vbroadcastss>;
5878 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5879 int_x86_avx_vbroadcastss_256>;
5880 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5881 int_x86_avx_vbroadcast_sd_256>;
5882 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5883 int_x86_avx_vbroadcastf128_pd_256>;
5885 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5886 (VBROADCASTF128 addr:$src)>;
5888 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
5889 (VBROADCASTSSY addr:$src)>;
5890 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
5891 (VBROADCASTSD addr:$src)>;
5892 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
5893 (VBROADCASTSSY addr:$src)>;
5894 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
5895 (VBROADCASTSD addr:$src)>;
5897 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
5898 (VBROADCASTSS addr:$src)>;
5899 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
5900 (VBROADCASTSS addr:$src)>;
5902 //===----------------------------------------------------------------------===//
5903 // VINSERTF128 - Insert packed floating-point values
5905 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5906 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5907 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5909 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5910 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5911 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5914 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5915 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5916 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5917 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5918 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5919 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5921 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5923 (VINSERTF128rr VR256:$src1, VR128:$src2,
5924 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5925 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5927 (VINSERTF128rr VR256:$src1, VR128:$src2,
5928 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5929 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5931 (VINSERTF128rr VR256:$src1, VR128:$src2,
5932 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5933 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5935 (VINSERTF128rr VR256:$src1, VR128:$src2,
5936 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5937 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
5939 (VINSERTF128rr VR256:$src1, VR128:$src2,
5940 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5941 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
5943 (VINSERTF128rr VR256:$src1, VR128:$src2,
5944 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5946 // Special COPY patterns
5947 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
5948 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5949 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
5950 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5951 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
5952 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5953 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
5954 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5955 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
5956 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5957 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
5958 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5960 //===----------------------------------------------------------------------===//
5961 // VEXTRACTF128 - Extract packed floating-point values
5963 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5964 (ins VR256:$src1, i8imm:$src2),
5965 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5967 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5968 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5969 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5972 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5973 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5974 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5975 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5976 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5977 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5979 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5980 (v4f32 (VEXTRACTF128rr
5981 (v8f32 VR256:$src1),
5982 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5983 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5984 (v2f64 (VEXTRACTF128rr
5985 (v4f64 VR256:$src1),
5986 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5987 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5988 (v4i32 (VEXTRACTF128rr
5989 (v8i32 VR256:$src1),
5990 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5991 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5992 (v2i64 (VEXTRACTF128rr
5993 (v4i64 VR256:$src1),
5994 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5995 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5996 (v8i16 (VEXTRACTF128rr
5997 (v16i16 VR256:$src1),
5998 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5999 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6000 (v16i8 (VEXTRACTF128rr
6001 (v32i8 VR256:$src1),
6002 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6004 // Special COPY patterns
6005 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
6006 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
6007 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
6008 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
6010 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
6011 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
6012 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
6013 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
6015 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))),
6016 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
6017 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))),
6018 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
6021 //===----------------------------------------------------------------------===//
6022 // VMASKMOV - Conditional SIMD Packed Loads and Stores
6024 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
6025 Intrinsic IntLd, Intrinsic IntLd256,
6026 Intrinsic IntSt, Intrinsic IntSt256,
6027 PatFrag pf128, PatFrag pf256> {
6028 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
6029 (ins VR128:$src1, f128mem:$src2),
6030 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6031 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
6033 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
6034 (ins VR256:$src1, f256mem:$src2),
6035 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6036 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
6038 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
6039 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
6040 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6041 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
6042 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
6043 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
6044 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6045 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
6048 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
6049 int_x86_avx_maskload_ps,
6050 int_x86_avx_maskload_ps_256,
6051 int_x86_avx_maskstore_ps,
6052 int_x86_avx_maskstore_ps_256,
6053 memopv4f32, memopv8f32>;
6054 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
6055 int_x86_avx_maskload_pd,
6056 int_x86_avx_maskload_pd_256,
6057 int_x86_avx_maskstore_pd,
6058 int_x86_avx_maskstore_pd_256,
6059 memopv2f64, memopv4f64>;
6061 //===----------------------------------------------------------------------===//
6062 // VPERMIL - Permute Single and Double Floating-Point Values
6064 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
6065 RegisterClass RC, X86MemOperand x86memop_f,
6066 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
6067 Intrinsic IntVar, Intrinsic IntImm> {
6068 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
6069 (ins RC:$src1, RC:$src2),
6070 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6071 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
6072 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
6073 (ins RC:$src1, x86memop_i:$src2),
6074 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6075 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
6077 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
6078 (ins RC:$src1, i8imm:$src2),
6079 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6080 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
6081 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
6082 (ins x86memop_f:$src1, i8imm:$src2),
6083 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6084 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
6087 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
6088 memopv4f32, memopv4i32,
6089 int_x86_avx_vpermilvar_ps,
6090 int_x86_avx_vpermil_ps>;
6091 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
6092 memopv8f32, memopv8i32,
6093 int_x86_avx_vpermilvar_ps_256,
6094 int_x86_avx_vpermil_ps_256>;
6095 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
6096 memopv2f64, memopv2i64,
6097 int_x86_avx_vpermilvar_pd,
6098 int_x86_avx_vpermil_pd>;
6099 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
6100 memopv4f64, memopv4i64,
6101 int_x86_avx_vpermilvar_pd_256,
6102 int_x86_avx_vpermil_pd_256>;
6104 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
6105 (VPERMILPSYri VR256:$src1, imm:$imm)>;
6106 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
6107 (VPERMILPDYri VR256:$src1, imm:$imm)>;
6108 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
6109 (VPERMILPSYri VR256:$src1, imm:$imm)>;
6110 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
6111 (VPERMILPDYri VR256:$src1, imm:$imm)>;
6113 //===----------------------------------------------------------------------===//
6114 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
6116 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
6117 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
6118 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6120 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
6121 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
6122 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6125 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
6126 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6127 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
6128 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6129 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
6130 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6132 def : Pat<(int_x86_avx_vperm2f128_ps_256
6133 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
6134 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6135 def : Pat<(int_x86_avx_vperm2f128_pd_256
6136 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
6137 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6138 def : Pat<(int_x86_avx_vperm2f128_si_256
6139 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
6140 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6142 def : Pat<(v8f32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6143 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6144 def : Pat<(v8i32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6145 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6146 def : Pat<(v4i64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6147 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6148 def : Pat<(v4f64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6149 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6150 def : Pat<(v32i8 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6151 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6152 def : Pat<(v16i16 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6153 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6155 //===----------------------------------------------------------------------===//
6156 // VZERO - Zero YMM registers
6158 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
6159 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
6160 // Zero All YMM registers
6161 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
6162 [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
6166 // Zero Upper bits of YMM registers
6167 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
6168 [(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;
6170 //===----------------------------------------------------------------------===//
6171 // SSE Shuffle pattern fragments
6172 //===----------------------------------------------------------------------===//
6174 // This is part of a "work in progress" refactoring. The idea is that all
6175 // vector shuffles are going to be translated into target specific nodes and
6176 // directly matched by the patterns below (which can be changed along the way)
6177 // The AVX version of some but not all of them are described here, and more
6178 // should come in a near future.
6180 // Shuffle with MOVLHPD
6181 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
6182 (scalar_to_vector (loadf64 addr:$src2)))),
6183 (MOVHPDrm VR128:$src1, addr:$src2)>;
6185 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
6186 // is during lowering, where it's not possible to recognize the load fold cause
6187 // it has two uses through a bitcast. One use disappears at isel time and the
6188 // fold opportunity reappears.
6189 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
6190 (scalar_to_vector (loadf64 addr:$src2)))),
6191 (MOVHPDrm VR128:$src1, addr:$src2)>;
6193 // Shuffle with MOVSS
6194 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
6195 (MOVSSrr VR128:$src1, FR32:$src2)>;
6196 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
6197 (MOVSSrr (v4i32 VR128:$src1),
6198 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
6199 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6200 (MOVSSrr (v4f32 VR128:$src1),
6201 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
6203 // Shuffle with MOVSD
6204 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
6205 (MOVSDrr VR128:$src1, FR64:$src2)>;
6206 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
6207 (MOVSDrr (v2i64 VR128:$src1),
6208 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
6209 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6210 (MOVSDrr (v2f64 VR128:$src1),
6211 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
6212 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
6213 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6214 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
6215 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6217 // Shuffle with MOVLPS
6218 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
6219 (MOVLPSrm VR128:$src1, addr:$src2)>;
6220 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
6221 (MOVLPSrm VR128:$src1, addr:$src2)>;
6222 def : Pat<(X86Movlps VR128:$src1,
6223 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
6224 (MOVLPSrm VR128:$src1, addr:$src2)>;
6225 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
6226 // is during lowering, where it's not possible to recognize the load fold cause
6227 // it has two uses through a bitcast. One use disappears at isel time and the
6228 // fold opportunity reappears.
6229 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
6230 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6232 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
6233 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6235 // Shuffle with MOVLPD
6236 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6237 (MOVLPDrm VR128:$src1, addr:$src2)>;
6238 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6239 (MOVLPDrm VR128:$src1, addr:$src2)>;
6240 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
6241 (scalar_to_vector (loadf64 addr:$src2)))),
6242 (MOVLPDrm VR128:$src1, addr:$src2)>;
6244 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
6245 def : Pat<(store (f64 (vector_extract
6246 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6247 (MOVHPSmr addr:$dst, VR128:$src)>;
6248 def : Pat<(store (f64 (vector_extract
6249 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6250 (MOVHPDmr addr:$dst, VR128:$src)>;
6252 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
6253 (MOVLPSmr addr:$src1, VR128:$src2)>;
6254 def : Pat<(store (v4i32 (X86Movlps
6255 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
6256 (MOVLPSmr addr:$src1, VR128:$src2)>;
6258 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6259 (MOVLPDmr addr:$src1, VR128:$src2)>;
6260 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6261 (MOVLPDmr addr:$src1, VR128:$src2)>;