1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 let isAsmParserOnly = 1 in {
139 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
140 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
141 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
142 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
144 let canFoldAsLoad = 1, isReMaterializable = 1 in {
145 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
147 let AddedComplexity = 20 in
148 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
152 let Constraints = "$src1 = $dst" in {
153 def MOVSSrr : sse12_move_rr<FR32, v4f32,
154 "movss\t{$src2, $dst|$dst, $src2}">, XS;
155 def MOVSDrr : sse12_move_rr<FR64, v2f64,
156 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
159 let canFoldAsLoad = 1, isReMaterializable = 1 in {
160 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
162 let AddedComplexity = 20 in
163 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
166 let AddedComplexity = 15 in {
167 // Extract the low 32-bit value from one vector and insert it into another.
168 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
169 (MOVSSrr (v4f32 VR128:$src1),
170 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
171 // Extract the low 64-bit value from one vector and insert it into another.
172 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
173 (MOVSDrr (v2f64 VR128:$src1),
174 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
177 // Implicitly promote a 32-bit scalar to a vector.
178 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
179 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
180 // Implicitly promote a 64-bit scalar to a vector.
181 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
182 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
183 // Implicitly promote a 32-bit scalar to a vector.
184 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
185 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
186 // Implicitly promote a 64-bit scalar to a vector.
187 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
188 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
190 let AddedComplexity = 20 in {
191 // MOVSSrm zeros the high parts of the register; represent this
192 // with SUBREG_TO_REG.
193 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
194 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
195 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
196 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
197 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
198 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
199 // MOVSDrm zeros the high parts of the register; represent this
200 // with SUBREG_TO_REG.
201 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
202 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
203 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
204 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
205 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
206 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
207 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
208 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
209 def : Pat<(v2f64 (X86vzload addr:$src)),
210 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
213 // Store scalar value to memory.
214 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
215 "movss\t{$src, $dst|$dst, $src}",
216 [(store FR32:$src, addr:$dst)]>;
217 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
218 "movsd\t{$src, $dst|$dst, $src}",
219 [(store FR64:$src, addr:$dst)]>;
221 let isAsmParserOnly = 1 in {
222 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
223 "movss\t{$src, $dst|$dst, $src}",
224 [(store FR32:$src, addr:$dst)]>, XS, VEX;
225 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
226 "movsd\t{$src, $dst|$dst, $src}",
227 [(store FR64:$src, addr:$dst)]>, XD, VEX;
230 // Extract and store.
231 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
234 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
235 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
238 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
240 // Move Aligned/Unaligned floating point values
241 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
242 X86MemOperand x86memop, PatFrag ld_frag,
243 string asm, Domain d,
244 bit IsReMaterializable = 1> {
245 let neverHasSideEffects = 1 in
246 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
247 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
248 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
249 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
250 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
251 [(set RC:$dst, (ld_frag addr:$src))], d>;
254 let isAsmParserOnly = 1 in {
255 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
256 "movaps", SSEPackedSingle>, VEX;
257 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
258 "movapd", SSEPackedDouble>, OpSize, VEX;
259 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
260 "movups", SSEPackedSingle>, VEX;
261 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
262 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
264 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
265 "movaps", SSEPackedSingle>, VEX;
266 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
267 "movapd", SSEPackedDouble>, OpSize, VEX;
268 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
269 "movups", SSEPackedSingle>, VEX;
270 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
271 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
273 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
274 "movaps", SSEPackedSingle>, TB;
275 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
276 "movapd", SSEPackedDouble>, TB, OpSize;
277 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
278 "movups", SSEPackedSingle>, TB;
279 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
280 "movupd", SSEPackedDouble, 0>, TB, OpSize;
282 let isAsmParserOnly = 1 in {
283 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
284 "movaps\t{$src, $dst|$dst, $src}",
285 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
286 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
287 "movapd\t{$src, $dst|$dst, $src}",
288 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
289 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
290 "movups\t{$src, $dst|$dst, $src}",
291 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
292 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
293 "movupd\t{$src, $dst|$dst, $src}",
294 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
295 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
296 "movaps\t{$src, $dst|$dst, $src}",
297 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
298 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
299 "movapd\t{$src, $dst|$dst, $src}",
300 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
301 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
302 "movups\t{$src, $dst|$dst, $src}",
303 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
304 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
305 "movupd\t{$src, $dst|$dst, $src}",
306 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
309 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
310 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
311 (VMOVUPSYmr addr:$dst, VR256:$src)>;
313 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
314 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
315 (VMOVUPDYmr addr:$dst, VR256:$src)>;
317 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
318 "movaps\t{$src, $dst|$dst, $src}",
319 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
320 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
321 "movapd\t{$src, $dst|$dst, $src}",
322 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
323 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
324 "movups\t{$src, $dst|$dst, $src}",
325 [(store (v4f32 VR128:$src), addr:$dst)]>;
326 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
327 "movupd\t{$src, $dst|$dst, $src}",
328 [(store (v2f64 VR128:$src), addr:$dst)]>;
330 // Intrinsic forms of MOVUPS/D load and store
331 let isAsmParserOnly = 1 in {
332 let canFoldAsLoad = 1, isReMaterializable = 1 in
333 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
335 "movups\t{$src, $dst|$dst, $src}",
336 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
337 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
339 "movupd\t{$src, $dst|$dst, $src}",
340 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
341 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
342 (ins f128mem:$dst, VR128:$src),
343 "movups\t{$src, $dst|$dst, $src}",
344 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
345 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
346 (ins f128mem:$dst, VR128:$src),
347 "movupd\t{$src, $dst|$dst, $src}",
348 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
350 let canFoldAsLoad = 1, isReMaterializable = 1 in
351 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
352 "movups\t{$src, $dst|$dst, $src}",
353 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
354 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
355 "movupd\t{$src, $dst|$dst, $src}",
356 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
358 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
359 "movups\t{$src, $dst|$dst, $src}",
360 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
361 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
362 "movupd\t{$src, $dst|$dst, $src}",
363 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
365 // Move Low/High packed floating point values
366 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
367 PatFrag mov_frag, string base_opc,
369 def PSrm : PI<opc, MRMSrcMem,
370 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
371 !strconcat(base_opc, "s", asm_opr),
374 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
375 SSEPackedSingle>, TB;
377 def PDrm : PI<opc, MRMSrcMem,
378 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
379 !strconcat(base_opc, "d", asm_opr),
380 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
381 (scalar_to_vector (loadf64 addr:$src2)))))],
382 SSEPackedDouble>, TB, OpSize;
385 let isAsmParserOnly = 1, AddedComplexity = 20 in {
386 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
387 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
388 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
389 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
391 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
392 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
393 "\t{$src2, $dst|$dst, $src2}">;
394 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
395 "\t{$src2, $dst|$dst, $src2}">;
398 let isAsmParserOnly = 1 in {
399 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
400 "movlps\t{$src, $dst|$dst, $src}",
401 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
402 (iPTR 0))), addr:$dst)]>, VEX;
403 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
404 "movlpd\t{$src, $dst|$dst, $src}",
405 [(store (f64 (vector_extract (v2f64 VR128:$src),
406 (iPTR 0))), addr:$dst)]>, VEX;
408 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
409 "movlps\t{$src, $dst|$dst, $src}",
410 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
411 (iPTR 0))), addr:$dst)]>;
412 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
413 "movlpd\t{$src, $dst|$dst, $src}",
414 [(store (f64 (vector_extract (v2f64 VR128:$src),
415 (iPTR 0))), addr:$dst)]>;
417 // v2f64 extract element 1 is always custom lowered to unpack high to low
418 // and extract element 0 so the non-store version isn't too horrible.
419 let isAsmParserOnly = 1 in {
420 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
421 "movhps\t{$src, $dst|$dst, $src}",
422 [(store (f64 (vector_extract
423 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
424 (undef)), (iPTR 0))), addr:$dst)]>,
426 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
427 "movhpd\t{$src, $dst|$dst, $src}",
428 [(store (f64 (vector_extract
429 (v2f64 (unpckh VR128:$src, (undef))),
430 (iPTR 0))), addr:$dst)]>,
433 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
434 "movhps\t{$src, $dst|$dst, $src}",
435 [(store (f64 (vector_extract
436 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
437 (undef)), (iPTR 0))), addr:$dst)]>;
438 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movhpd\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract
441 (v2f64 (unpckh VR128:$src, (undef))),
442 (iPTR 0))), addr:$dst)]>;
444 let isAsmParserOnly = 1, AddedComplexity = 20 in {
445 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
446 (ins VR128:$src1, VR128:$src2),
447 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
449 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
451 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
452 (ins VR128:$src1, VR128:$src2),
453 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
455 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
458 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
459 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
460 (ins VR128:$src1, VR128:$src2),
461 "movlhps\t{$src2, $dst|$dst, $src2}",
463 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
464 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
465 (ins VR128:$src1, VR128:$src2),
466 "movhlps\t{$src2, $dst|$dst, $src2}",
468 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
471 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
472 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
473 let AddedComplexity = 20 in {
474 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
475 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
476 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
477 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
480 //===----------------------------------------------------------------------===//
481 // SSE 1 & 2 - Conversion Instructions
482 //===----------------------------------------------------------------------===//
484 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
485 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
487 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
488 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
489 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
490 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
493 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
494 X86MemOperand x86memop, string asm> {
495 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
497 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
501 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
502 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
503 string asm, Domain d> {
504 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
505 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
506 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
507 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
510 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
511 X86MemOperand x86memop, string asm> {
512 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
513 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
514 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
515 (ins DstRC:$src1, x86memop:$src),
516 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
519 let isAsmParserOnly = 1 in {
520 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
521 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
522 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
523 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
525 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
526 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
527 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
528 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
531 // The assembler can recognize rr 64-bit instructions by seeing a rxx
532 // register, but the same isn't true when only using memory operands,
533 // provide other assembly "l" and "q" forms to address this explicitly
534 // where appropriate to do so.
535 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
537 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
539 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
541 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
543 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
547 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
548 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
549 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
550 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
551 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
552 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
553 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
554 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
555 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
556 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
557 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
558 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
559 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
560 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
561 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
562 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
564 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
565 // and/or XMM operand(s).
567 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
568 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
570 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
571 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
572 [(set DstRC:$dst, (Int SrcRC:$src))]>;
573 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
574 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
575 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
578 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
579 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
580 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
581 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
583 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
584 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
585 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
586 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
587 (ins DstRC:$src1, x86memop:$src2),
589 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
590 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
591 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
594 let isAsmParserOnly = 1 in {
595 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
596 f32mem, load, "cvtss2si">, XS, VEX;
597 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
598 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
600 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
601 f128mem, load, "cvtsd2si">, XD, VEX;
602 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
603 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
606 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
607 // Get rid of this hack or rename the intrinsics, there are several
608 // intructions that only match with the intrinsic form, why create duplicates
609 // to let them be recognized by the assembler?
610 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
611 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
612 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
613 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
615 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
616 f32mem, load, "cvtss2si">, XS;
617 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
618 f32mem, load, "cvtss2si{q}">, XS, REX_W;
619 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
620 f128mem, load, "cvtsd2si{l}">, XD;
621 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
622 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
625 let isAsmParserOnly = 1 in {
626 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
627 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
628 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
629 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
631 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
632 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
633 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
634 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
638 let Constraints = "$src1 = $dst" in {
639 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
640 int_x86_sse_cvtsi2ss, i32mem, loadi32,
642 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
643 int_x86_sse_cvtsi642ss, i64mem, loadi64,
644 "cvtsi2ss{q}">, XS, REX_W;
645 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
646 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
648 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
649 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
650 "cvtsi2sd">, XD, REX_W;
655 // Aliases for intrinsics
656 let isAsmParserOnly = 1 in {
657 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
658 f32mem, load, "cvttss2si">, XS, VEX;
659 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
660 int_x86_sse_cvttss2si64, f32mem, load,
661 "cvttss2si">, XS, VEX, VEX_W;
662 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
663 f128mem, load, "cvttsd2si">, XD, VEX;
664 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
665 int_x86_sse2_cvttsd2si64, f128mem, load,
666 "cvttsd2si">, XD, VEX, VEX_W;
668 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
669 f32mem, load, "cvttss2si">, XS;
670 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
671 int_x86_sse_cvttss2si64, f32mem, load,
672 "cvttss2si{q}">, XS, REX_W;
673 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
674 f128mem, load, "cvttsd2si">, XD;
675 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
676 int_x86_sse2_cvttsd2si64, f128mem, load,
677 "cvttsd2si{q}">, XD, REX_W;
679 let isAsmParserOnly = 1, Pattern = []<dag> in {
680 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
681 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
682 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
683 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
685 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
686 "cvtdq2ps\t{$src, $dst|$dst, $src}",
687 SSEPackedSingle>, TB, VEX;
688 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
689 "cvtdq2ps\t{$src, $dst|$dst, $src}",
690 SSEPackedSingle>, TB, VEX;
692 let Pattern = []<dag> in {
693 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
694 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
695 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
696 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
697 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
698 "cvtdq2ps\t{$src, $dst|$dst, $src}",
699 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
704 // Convert scalar double to scalar single
705 let isAsmParserOnly = 1 in {
706 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
707 (ins FR64:$src1, FR64:$src2),
708 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
710 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
711 (ins FR64:$src1, f64mem:$src2),
712 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
713 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
715 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
716 "cvtsd2ss\t{$src, $dst|$dst, $src}",
717 [(set FR32:$dst, (fround FR64:$src))]>;
718 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
719 "cvtsd2ss\t{$src, $dst|$dst, $src}",
720 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
721 Requires<[HasSSE2, OptForSize]>;
723 let isAsmParserOnly = 1 in
724 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
725 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
727 let Constraints = "$src1 = $dst" in
728 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
729 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
731 // Convert scalar single to scalar double
732 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
733 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
734 (ins FR32:$src1, FR32:$src2),
735 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
736 []>, XS, Requires<[HasAVX]>, VEX_4V;
737 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
738 (ins FR32:$src1, f32mem:$src2),
739 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
740 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
742 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
743 "cvtss2sd\t{$src, $dst|$dst, $src}",
744 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
746 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
747 "cvtss2sd\t{$src, $dst|$dst, $src}",
748 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
749 Requires<[HasSSE2, OptForSize]>;
751 let isAsmParserOnly = 1 in {
752 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
753 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
754 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
755 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
756 VR128:$src2))]>, XS, VEX_4V,
758 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
759 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
760 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
761 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
762 (load addr:$src2)))]>, XS, VEX_4V,
765 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
766 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
767 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
768 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
769 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
772 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
773 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
774 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
775 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
776 (load addr:$src2)))]>, XS,
780 def : Pat<(extloadf32 addr:$src),
781 (CVTSS2SDrr (MOVSSrm addr:$src))>,
782 Requires<[HasSSE2, OptForSpeed]>;
784 // Convert doubleword to packed single/double fp
785 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
786 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
787 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
788 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
789 TB, VEX, Requires<[HasAVX]>;
790 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
791 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
792 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
793 (bitconvert (memopv2i64 addr:$src))))]>,
794 TB, VEX, Requires<[HasAVX]>;
796 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
797 "cvtdq2ps\t{$src, $dst|$dst, $src}",
798 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
799 TB, Requires<[HasSSE2]>;
800 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
801 "cvtdq2ps\t{$src, $dst|$dst, $src}",
802 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
803 (bitconvert (memopv2i64 addr:$src))))]>,
804 TB, Requires<[HasSSE2]>;
806 // FIXME: why the non-intrinsic version is described as SSE3?
807 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
808 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
809 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
810 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
811 XS, VEX, Requires<[HasAVX]>;
812 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
813 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
814 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
815 (bitconvert (memopv2i64 addr:$src))))]>,
816 XS, VEX, Requires<[HasAVX]>;
818 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
819 "cvtdq2pd\t{$src, $dst|$dst, $src}",
820 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
821 XS, Requires<[HasSSE2]>;
822 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
823 "cvtdq2pd\t{$src, $dst|$dst, $src}",
824 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
825 (bitconvert (memopv2i64 addr:$src))))]>,
826 XS, Requires<[HasSSE2]>;
829 // Convert packed single/double fp to doubleword
830 let isAsmParserOnly = 1 in {
831 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
832 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
833 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
834 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
835 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
836 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
837 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
838 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
840 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
841 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
842 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
843 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
845 let isAsmParserOnly = 1 in {
846 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
847 "cvtps2dq\t{$src, $dst|$dst, $src}",
848 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
850 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
852 "cvtps2dq\t{$src, $dst|$dst, $src}",
853 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
854 (memop addr:$src)))]>, VEX;
856 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
857 "cvtps2dq\t{$src, $dst|$dst, $src}",
858 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
859 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
860 "cvtps2dq\t{$src, $dst|$dst, $src}",
861 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
862 (memop addr:$src)))]>;
864 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
865 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
866 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
867 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
868 XD, VEX, Requires<[HasAVX]>;
869 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
870 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
871 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
872 (memop addr:$src)))]>,
873 XD, VEX, Requires<[HasAVX]>;
875 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
876 "cvtpd2dq\t{$src, $dst|$dst, $src}",
877 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
878 XD, Requires<[HasSSE2]>;
879 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
880 "cvtpd2dq\t{$src, $dst|$dst, $src}",
881 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
882 (memop addr:$src)))]>,
883 XD, Requires<[HasSSE2]>;
886 // Convert with truncation packed single/double fp to doubleword
887 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
888 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
889 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
890 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
891 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
892 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
893 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
894 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
895 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
897 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
898 "cvttps2dq\t{$src, $dst|$dst, $src}",
900 (int_x86_sse2_cvttps2dq VR128:$src))]>;
901 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
902 "cvttps2dq\t{$src, $dst|$dst, $src}",
904 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
907 let isAsmParserOnly = 1 in {
908 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
909 "vcvttps2dq\t{$src, $dst|$dst, $src}",
911 (int_x86_sse2_cvttps2dq VR128:$src))]>,
912 XS, VEX, Requires<[HasAVX]>;
913 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
914 "vcvttps2dq\t{$src, $dst|$dst, $src}",
915 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
916 (memop addr:$src)))]>,
917 XS, VEX, Requires<[HasAVX]>;
920 let isAsmParserOnly = 1 in {
921 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
923 "cvttpd2dq\t{$src, $dst|$dst, $src}",
924 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
926 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
928 "cvttpd2dq\t{$src, $dst|$dst, $src}",
929 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
930 (memop addr:$src)))]>, VEX;
932 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
933 "cvttpd2dq\t{$src, $dst|$dst, $src}",
934 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
935 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
936 "cvttpd2dq\t{$src, $dst|$dst, $src}",
937 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
938 (memop addr:$src)))]>;
940 let isAsmParserOnly = 1 in {
941 // The assembler can recognize rr 256-bit instructions by seeing a ymm
942 // register, but the same isn't true when using memory operands instead.
943 // Provide other assembly rr and rm forms to address this explicitly.
944 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
945 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
946 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
947 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
950 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
951 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
952 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
953 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
956 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
957 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
958 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
959 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
962 // Convert packed single to packed double
963 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
964 // SSE2 instructions without OpSize prefix
965 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
966 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
967 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
968 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
969 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
970 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
971 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
972 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
974 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
975 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
976 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
977 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
979 let isAsmParserOnly = 1 in {
980 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
981 "vcvtps2pd\t{$src, $dst|$dst, $src}",
982 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
983 VEX, Requires<[HasAVX]>;
984 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
985 "vcvtps2pd\t{$src, $dst|$dst, $src}",
986 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
987 (load addr:$src)))]>,
988 VEX, Requires<[HasAVX]>;
990 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
991 "cvtps2pd\t{$src, $dst|$dst, $src}",
992 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
993 TB, Requires<[HasSSE2]>;
994 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
995 "cvtps2pd\t{$src, $dst|$dst, $src}",
996 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
997 (load addr:$src)))]>,
998 TB, Requires<[HasSSE2]>;
1000 // Convert packed double to packed single
1001 let isAsmParserOnly = 1 in {
1002 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1003 // register, but the same isn't true when using memory operands instead.
1004 // Provide other assembly rr and rm forms to address this explicitly.
1005 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1006 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1007 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1008 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1011 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1012 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1013 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1014 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1017 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1018 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1019 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1020 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1022 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1023 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1024 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1025 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1028 let isAsmParserOnly = 1 in {
1029 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1030 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1031 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1032 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1034 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1035 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1036 (memop addr:$src)))]>;
1038 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1039 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1040 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1041 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1042 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1043 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1044 (memop addr:$src)))]>;
1046 // AVX 256-bit register conversion intrinsics
1047 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1048 // whenever possible to avoid declaring two versions of each one.
1049 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1050 (VCVTDQ2PSYrr VR256:$src)>;
1051 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1052 (VCVTDQ2PSYrm addr:$src)>;
1054 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1055 (VCVTPD2PSYrr VR256:$src)>;
1056 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1057 (VCVTPD2PSYrm addr:$src)>;
1059 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1060 (VCVTPS2DQYrr VR256:$src)>;
1061 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1062 (VCVTPS2DQYrm addr:$src)>;
1064 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1065 (VCVTPS2PDYrr VR128:$src)>;
1066 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1067 (VCVTPS2PDYrm addr:$src)>;
1069 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1070 (VCVTTPD2DQYrr VR256:$src)>;
1071 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1072 (VCVTTPD2DQYrm addr:$src)>;
1074 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1075 (VCVTTPS2DQYrr VR256:$src)>;
1076 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1077 (VCVTTPS2DQYrm addr:$src)>;
1079 //===----------------------------------------------------------------------===//
1080 // SSE 1 & 2 - Compare Instructions
1081 //===----------------------------------------------------------------------===//
1083 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1084 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1085 string asm, string asm_alt> {
1086 def rr : SIi8<0xC2, MRMSrcReg,
1087 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1090 def rm : SIi8<0xC2, MRMSrcMem,
1091 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1093 // Accept explicit immediate argument form instead of comparison code.
1094 let isAsmParserOnly = 1 in {
1095 def rr_alt : SIi8<0xC2, MRMSrcReg,
1096 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1099 def rm_alt : SIi8<0xC2, MRMSrcMem,
1100 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1105 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1106 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1107 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1108 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1110 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1111 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1112 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1116 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1117 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1118 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1119 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1120 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1121 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1122 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1125 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1126 Intrinsic Int, string asm> {
1127 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1128 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1129 [(set VR128:$dst, (Int VR128:$src1,
1130 VR128:$src, imm:$cc))]>;
1131 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1132 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1133 [(set VR128:$dst, (Int VR128:$src1,
1134 (load addr:$src), imm:$cc))]>;
1137 // Aliases to match intrinsics which expect XMM operand(s).
1138 let isAsmParserOnly = 1 in {
1139 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1140 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1142 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1143 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1146 let Constraints = "$src1 = $dst" in {
1147 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1148 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1149 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1150 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1154 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1155 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1156 ValueType vt, X86MemOperand x86memop,
1157 PatFrag ld_frag, string OpcodeStr, Domain d> {
1158 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1159 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1160 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1161 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1162 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1163 [(set EFLAGS, (OpNode (vt RC:$src1),
1164 (ld_frag addr:$src2)))], d>;
1167 let Defs = [EFLAGS] in {
1168 let isAsmParserOnly = 1 in {
1169 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1170 "ucomiss", SSEPackedSingle>, VEX;
1171 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1172 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1173 let Pattern = []<dag> in {
1174 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1175 "comiss", SSEPackedSingle>, VEX;
1176 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1177 "comisd", SSEPackedDouble>, OpSize, VEX;
1180 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1181 load, "ucomiss", SSEPackedSingle>, VEX;
1182 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1183 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1185 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1186 load, "comiss", SSEPackedSingle>, VEX;
1187 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1188 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1190 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1191 "ucomiss", SSEPackedSingle>, TB;
1192 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1193 "ucomisd", SSEPackedDouble>, TB, OpSize;
1195 let Pattern = []<dag> in {
1196 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1197 "comiss", SSEPackedSingle>, TB;
1198 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1199 "comisd", SSEPackedDouble>, TB, OpSize;
1202 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1203 load, "ucomiss", SSEPackedSingle>, TB;
1204 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1205 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1207 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1208 "comiss", SSEPackedSingle>, TB;
1209 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1210 "comisd", SSEPackedDouble>, TB, OpSize;
1211 } // Defs = [EFLAGS]
1213 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1214 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1215 Intrinsic Int, string asm, string asm_alt,
1217 def rri : PIi8<0xC2, MRMSrcReg,
1218 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1219 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1220 def rmi : PIi8<0xC2, MRMSrcMem,
1221 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1222 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1223 // Accept explicit immediate argument form instead of comparison code.
1224 let isAsmParserOnly = 1 in {
1225 def rri_alt : PIi8<0xC2, MRMSrcReg,
1226 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1228 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1229 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1234 let isAsmParserOnly = 1 in {
1235 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1236 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1237 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1238 SSEPackedSingle>, VEX_4V;
1239 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1240 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1241 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1242 SSEPackedDouble>, OpSize, VEX_4V;
1243 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1244 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1245 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1246 SSEPackedSingle>, VEX_4V;
1247 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1248 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1249 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1250 SSEPackedDouble>, OpSize, VEX_4V;
1252 let Constraints = "$src1 = $dst" in {
1253 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1254 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1255 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1256 SSEPackedSingle>, TB;
1257 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1258 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1259 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1260 SSEPackedDouble>, TB, OpSize;
1263 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1264 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1265 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1266 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1267 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1268 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1269 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1270 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1272 //===----------------------------------------------------------------------===//
1273 // SSE 1 & 2 - Shuffle Instructions
1274 //===----------------------------------------------------------------------===//
1276 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1277 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1278 ValueType vt, string asm, PatFrag mem_frag,
1279 Domain d, bit IsConvertibleToThreeAddress = 0> {
1280 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1281 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1282 [(set RC:$dst, (vt (shufp:$src3
1283 RC:$src1, (mem_frag addr:$src2))))], d>;
1284 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1285 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1286 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1288 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1291 let isAsmParserOnly = 1 in {
1292 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1293 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1294 memopv4f32, SSEPackedSingle>, VEX_4V;
1295 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1296 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1297 memopv8f32, SSEPackedSingle>, VEX_4V;
1298 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1299 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1300 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1301 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1302 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1303 memopv4f64, SSEPackedDouble>, OpSize, VEX_4V;
1306 let Constraints = "$src1 = $dst" in {
1307 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1308 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1309 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1311 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1312 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1313 memopv2f64, SSEPackedDouble>, TB, OpSize;
1316 //===----------------------------------------------------------------------===//
1317 // SSE 1 & 2 - Unpack Instructions
1318 //===----------------------------------------------------------------------===//
1320 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1321 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1322 PatFrag mem_frag, RegisterClass RC,
1323 X86MemOperand x86memop, string asm,
1325 def rr : PI<opc, MRMSrcReg,
1326 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1328 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1329 def rm : PI<opc, MRMSrcMem,
1330 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1332 (vt (OpNode RC:$src1,
1333 (mem_frag addr:$src2))))], d>;
1336 let AddedComplexity = 10 in {
1337 let isAsmParserOnly = 1 in {
1338 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1339 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1340 SSEPackedSingle>, VEX_4V;
1341 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1342 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1343 SSEPackedDouble>, OpSize, VEX_4V;
1344 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1345 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1346 SSEPackedSingle>, VEX_4V;
1347 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1348 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1349 SSEPackedDouble>, OpSize, VEX_4V;
1351 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1352 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1353 SSEPackedSingle>, VEX_4V;
1354 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1355 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1356 SSEPackedDouble>, OpSize, VEX_4V;
1357 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1358 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1359 SSEPackedSingle>, VEX_4V;
1360 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1361 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1362 SSEPackedDouble>, OpSize, VEX_4V;
1365 let Constraints = "$src1 = $dst" in {
1366 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1367 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1368 SSEPackedSingle>, TB;
1369 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1370 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1371 SSEPackedDouble>, TB, OpSize;
1372 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1373 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1374 SSEPackedSingle>, TB;
1375 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1376 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1377 SSEPackedDouble>, TB, OpSize;
1378 } // Constraints = "$src1 = $dst"
1379 } // AddedComplexity
1381 //===----------------------------------------------------------------------===//
1382 // SSE 1 & 2 - Extract Floating-Point Sign mask
1383 //===----------------------------------------------------------------------===//
1385 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1386 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1388 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1389 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1390 [(set GR32:$dst, (Int RC:$src))], d>;
1391 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1392 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1396 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1397 SSEPackedSingle>, TB;
1398 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1399 SSEPackedDouble>, TB, OpSize;
1401 let isAsmParserOnly = 1 in {
1402 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1403 "movmskps", SSEPackedSingle>, VEX;
1404 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1405 "movmskpd", SSEPackedDouble>, OpSize,
1407 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1408 "movmskps", SSEPackedSingle>, VEX;
1409 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1410 "movmskpd", SSEPackedDouble>, OpSize,
1414 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1415 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1416 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1417 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1419 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1420 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1421 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1422 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1426 //===----------------------------------------------------------------------===//
1427 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1428 //===----------------------------------------------------------------------===//
1430 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1431 // names that start with 'Fs'.
1433 // Alias instructions that map fld0 to pxor for sse.
1434 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1435 canFoldAsLoad = 1 in {
1436 // FIXME: Set encoding to pseudo!
1437 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1438 [(set FR32:$dst, fp32imm0)]>,
1439 Requires<[HasSSE1]>, TB, OpSize;
1440 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1441 [(set FR64:$dst, fpimm0)]>,
1442 Requires<[HasSSE2]>, TB, OpSize;
1445 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1446 // bits are disregarded.
1447 let neverHasSideEffects = 1 in {
1448 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1449 "movaps\t{$src, $dst|$dst, $src}", []>;
1450 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1451 "movapd\t{$src, $dst|$dst, $src}", []>;
1454 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1455 // bits are disregarded.
1456 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1457 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1458 "movaps\t{$src, $dst|$dst, $src}",
1459 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1460 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1461 "movapd\t{$src, $dst|$dst, $src}",
1462 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1465 //===----------------------------------------------------------------------===//
1466 // SSE 1 & 2 - Logical Instructions
1467 //===----------------------------------------------------------------------===//
1469 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1471 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1473 let isAsmParserOnly = 1 in {
1474 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1475 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1477 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1478 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1481 let Constraints = "$src1 = $dst" in {
1482 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1483 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1485 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1486 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1490 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1491 let mayLoad = 0 in {
1492 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1493 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1494 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1497 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1498 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1500 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1502 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1503 SDNode OpNode, int HasPat = 0,
1504 list<list<dag>> Pattern = []> {
1505 let isAsmParserOnly = 1, Pattern = []<dag> in {
1506 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1507 !strconcat(OpcodeStr, "ps"), f128mem,
1508 !if(HasPat, Pattern[0], // rr
1509 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1511 !if(HasPat, Pattern[2], // rm
1512 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1513 (memopv2i64 addr:$src2)))]), 0>,
1516 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1517 !strconcat(OpcodeStr, "pd"), f128mem,
1518 !if(HasPat, Pattern[1], // rr
1519 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1522 !if(HasPat, Pattern[3], // rm
1523 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1524 (memopv2i64 addr:$src2)))]), 0>,
1527 let Constraints = "$src1 = $dst" in {
1528 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1529 !strconcat(OpcodeStr, "ps"), f128mem,
1530 !if(HasPat, Pattern[0], // rr
1531 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1533 !if(HasPat, Pattern[2], // rm
1534 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1535 (memopv2i64 addr:$src2)))])>, TB;
1537 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1538 !strconcat(OpcodeStr, "pd"), f128mem,
1539 !if(HasPat, Pattern[1], // rr
1540 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1543 !if(HasPat, Pattern[3], // rm
1544 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1545 (memopv2i64 addr:$src2)))])>,
1550 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1552 let isAsmParserOnly = 1 in {
1553 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1554 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1555 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1557 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1558 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1562 // AVX 256-bit packed logical ops forms
1563 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1564 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1565 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1566 let isCommutable = 0 in
1567 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1569 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1570 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1571 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1572 let isCommutable = 0 in
1573 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1575 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1576 (bc_v2i64 (v4i32 immAllOnesV))),
1579 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1580 (bc_v2i64 (v2f64 VR128:$src2))))],
1582 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1583 (bc_v2i64 (v4i32 immAllOnesV))),
1584 (memopv2i64 addr:$src2))))],
1586 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1587 (memopv2i64 addr:$src2)))]]>;
1589 //===----------------------------------------------------------------------===//
1590 // SSE 1 & 2 - Arithmetic Instructions
1591 //===----------------------------------------------------------------------===//
1593 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1596 /// In addition, we also have a special variant of the scalar form here to
1597 /// represent the associated intrinsic operation. This form is unlike the
1598 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1599 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1601 /// These three forms can each be reg+reg or reg+mem.
1604 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1606 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1608 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1609 OpNode, FR32, f32mem, Is2Addr>, XS;
1610 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1611 OpNode, FR64, f64mem, Is2Addr>, XD;
1614 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1616 let mayLoad = 0 in {
1617 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1618 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1619 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1620 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1624 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1626 let mayLoad = 0 in {
1627 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1628 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1629 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1630 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1634 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1636 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1637 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1638 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1639 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1642 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1644 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1645 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1646 SSEPackedSingle, Is2Addr>, TB;
1648 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1649 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1650 SSEPackedDouble, Is2Addr>, TB, OpSize;
1653 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1654 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1655 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1656 SSEPackedSingle, 0>, TB;
1658 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1659 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1660 SSEPackedDouble, 0>, TB, OpSize;
1663 // Binary Arithmetic instructions
1664 let isAsmParserOnly = 1 in {
1665 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1666 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1667 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1668 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1669 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1670 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1671 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1672 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1674 let isCommutable = 0 in {
1675 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1676 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1677 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1678 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1679 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1680 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1681 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1682 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1683 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1684 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1685 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1686 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1687 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1688 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1689 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1690 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1691 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1692 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1693 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1694 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1698 let Constraints = "$src1 = $dst" in {
1699 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1700 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1701 basic_sse12_fp_binop_s_int<0x58, "add">;
1702 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1703 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1704 basic_sse12_fp_binop_s_int<0x59, "mul">;
1706 let isCommutable = 0 in {
1707 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1708 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1709 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1710 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1711 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1712 basic_sse12_fp_binop_s_int<0x5E, "div">;
1713 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1714 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1715 basic_sse12_fp_binop_s_int<0x5F, "max">,
1716 basic_sse12_fp_binop_p_int<0x5F, "max">;
1717 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1718 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1719 basic_sse12_fp_binop_s_int<0x5D, "min">,
1720 basic_sse12_fp_binop_p_int<0x5D, "min">;
1725 /// In addition, we also have a special variant of the scalar form here to
1726 /// represent the associated intrinsic operation. This form is unlike the
1727 /// plain scalar form, in that it takes an entire vector (instead of a
1728 /// scalar) and leaves the top elements undefined.
1730 /// And, we have a special variant form for a full-vector intrinsic form.
1732 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1733 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1734 SDNode OpNode, Intrinsic F32Int> {
1735 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1736 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1737 [(set FR32:$dst, (OpNode FR32:$src))]>;
1738 // For scalar unary operations, fold a load into the operation
1739 // only in OptForSize mode. It eliminates an instruction, but it also
1740 // eliminates a whole-register clobber (the load), so it introduces a
1741 // partial register update condition.
1742 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1743 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1744 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1745 Requires<[HasSSE1, OptForSize]>;
1746 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1747 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1748 [(set VR128:$dst, (F32Int VR128:$src))]>;
1749 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1750 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1751 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1754 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1755 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1756 SDNode OpNode, Intrinsic F32Int> {
1757 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1758 !strconcat(OpcodeStr,
1759 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1760 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1761 !strconcat(OpcodeStr,
1762 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1763 []>, XS, Requires<[HasAVX, OptForSize]>;
1764 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1765 !strconcat(OpcodeStr,
1766 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1767 [(set VR128:$dst, (F32Int VR128:$src))]>;
1768 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1769 !strconcat(OpcodeStr,
1770 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1771 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1774 /// sse1_fp_unop_p - SSE1 unops in packed form.
1775 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1776 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1777 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1778 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1779 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1780 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1781 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1784 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1785 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1786 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1787 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1788 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1789 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1790 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1791 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1794 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1795 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1796 Intrinsic V4F32Int> {
1797 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1798 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1799 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1800 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1801 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1802 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1805 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1806 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1807 Intrinsic V4F32Int> {
1808 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1809 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1810 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1811 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1812 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1813 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1816 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1817 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1818 SDNode OpNode, Intrinsic F64Int> {
1819 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1820 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1821 [(set FR64:$dst, (OpNode FR64:$src))]>;
1822 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1823 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1824 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1825 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1826 Requires<[HasSSE2, OptForSize]>;
1827 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1828 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1829 [(set VR128:$dst, (F64Int VR128:$src))]>;
1830 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1831 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1832 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1835 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1836 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1837 SDNode OpNode, Intrinsic F64Int> {
1838 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1839 !strconcat(OpcodeStr,
1840 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1841 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1842 (ins FR64:$src1, f64mem:$src2),
1843 !strconcat(OpcodeStr,
1844 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1845 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1846 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1847 [(set VR128:$dst, (F64Int VR128:$src))]>;
1848 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1849 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1850 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1853 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1854 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1856 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1857 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1858 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1859 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1860 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1861 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1864 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1865 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1866 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1867 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1868 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1869 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1870 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1871 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1874 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1875 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1876 Intrinsic V2F64Int> {
1877 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1878 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1879 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1880 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1881 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1882 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1885 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1886 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1887 Intrinsic V2F64Int> {
1888 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1889 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1890 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1891 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1892 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1893 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1896 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1898 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1899 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1902 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1903 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1904 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1905 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1906 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1907 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1908 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1909 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1912 // Reciprocal approximations. Note that these typically require refinement
1913 // in order to obtain suitable precision.
1914 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1915 int_x86_sse_rsqrt_ss>, VEX_4V;
1916 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1917 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1918 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1919 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1921 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1923 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1924 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1925 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1926 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1930 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1931 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1932 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1933 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1934 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1935 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1937 // Reciprocal approximations. Note that these typically require refinement
1938 // in order to obtain suitable precision.
1939 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1940 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1941 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1942 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1943 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1944 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1946 // There is no f64 version of the reciprocal approximation instructions.
1948 //===----------------------------------------------------------------------===//
1949 // SSE 1 & 2 - Non-temporal stores
1950 //===----------------------------------------------------------------------===//
1952 let isAsmParserOnly = 1 in {
1953 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
1954 (ins i128mem:$dst, VR128:$src),
1955 "movntps\t{$src, $dst|$dst, $src}",
1956 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
1957 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
1958 (ins i128mem:$dst, VR128:$src),
1959 "movntpd\t{$src, $dst|$dst, $src}",
1960 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
1962 let ExeDomain = SSEPackedInt in
1963 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
1964 (ins f128mem:$dst, VR128:$src),
1965 "movntdq\t{$src, $dst|$dst, $src}",
1966 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
1968 let AddedComplexity = 400 in { // Prefer non-temporal versions
1969 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
1970 (ins f128mem:$dst, VR128:$src),
1971 "movntps\t{$src, $dst|$dst, $src}",
1972 [(alignednontemporalstore (v4f32 VR128:$src),
1974 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
1975 (ins f128mem:$dst, VR128:$src),
1976 "movntpd\t{$src, $dst|$dst, $src}",
1977 [(alignednontemporalstore (v2f64 VR128:$src),
1979 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
1980 (ins f128mem:$dst, VR128:$src),
1981 "movntdq\t{$src, $dst|$dst, $src}",
1982 [(alignednontemporalstore (v2f64 VR128:$src),
1984 let ExeDomain = SSEPackedInt in
1985 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
1986 (ins f128mem:$dst, VR128:$src),
1987 "movntdq\t{$src, $dst|$dst, $src}",
1988 [(alignednontemporalstore (v4f32 VR128:$src),
1991 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
1992 (ins f256mem:$dst, VR256:$src),
1993 "movntps\t{$src, $dst|$dst, $src}",
1994 [(alignednontemporalstore (v8f32 VR256:$src),
1996 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
1997 (ins f256mem:$dst, VR256:$src),
1998 "movntpd\t{$src, $dst|$dst, $src}",
1999 [(alignednontemporalstore (v4f64 VR256:$src),
2001 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2002 (ins f256mem:$dst, VR256:$src),
2003 "movntdq\t{$src, $dst|$dst, $src}",
2004 [(alignednontemporalstore (v4f64 VR256:$src),
2006 let ExeDomain = SSEPackedInt in
2007 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2008 (ins f256mem:$dst, VR256:$src),
2009 "movntdq\t{$src, $dst|$dst, $src}",
2010 [(alignednontemporalstore (v8f32 VR256:$src),
2015 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2016 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2017 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2018 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2019 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2020 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2022 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2023 "movntps\t{$src, $dst|$dst, $src}",
2024 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2025 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2026 "movntpd\t{$src, $dst|$dst, $src}",
2027 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2029 let ExeDomain = SSEPackedInt in
2030 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2031 "movntdq\t{$src, $dst|$dst, $src}",
2032 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2034 let AddedComplexity = 400 in { // Prefer non-temporal versions
2035 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2036 "movntps\t{$src, $dst|$dst, $src}",
2037 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2038 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2039 "movntpd\t{$src, $dst|$dst, $src}",
2040 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2042 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2043 "movntdq\t{$src, $dst|$dst, $src}",
2044 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2046 let ExeDomain = SSEPackedInt in
2047 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2048 "movntdq\t{$src, $dst|$dst, $src}",
2049 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2051 // There is no AVX form for instructions below this point
2052 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2053 "movnti\t{$src, $dst|$dst, $src}",
2054 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2055 TB, Requires<[HasSSE2]>;
2057 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2058 "movnti\t{$src, $dst|$dst, $src}",
2059 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2060 TB, Requires<[HasSSE2]>;
2063 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2064 "movnti\t{$src, $dst|$dst, $src}",
2065 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2066 TB, Requires<[HasSSE2]>;
2068 //===----------------------------------------------------------------------===//
2069 // SSE 1 & 2 - Misc Instructions (No AVX form)
2070 //===----------------------------------------------------------------------===//
2072 // Prefetch intrinsic.
2073 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2074 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2075 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2076 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2077 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2078 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2079 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2080 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2082 // Load, store, and memory fence
2083 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2084 TB, Requires<[HasSSE1]>;
2085 def : Pat<(X86SFence), (SFENCE)>;
2087 // Alias instructions that map zero vector to pxor / xorp* for sse.
2088 // We set canFoldAsLoad because this can be converted to a constant-pool
2089 // load of an all-zeros value if folding it would be beneficial.
2090 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2091 // JIT implementation, it does not expand the instructions below like
2092 // X86MCInstLower does.
2093 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2094 isCodeGenOnly = 1 in {
2095 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2096 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2097 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2098 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2099 let ExeDomain = SSEPackedInt in
2100 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2101 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2104 // The same as done above but for AVX. The 128-bit versions are the
2105 // same, but re-encoded. The 256-bit does not support PI version.
2106 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2107 // JIT implementatioan, it does not expand the instructions below like
2108 // X86MCInstLower does.
2109 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2110 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2111 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2112 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2113 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2114 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2115 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2116 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2117 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2118 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2119 let ExeDomain = SSEPackedInt in
2120 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2121 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2124 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2125 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2126 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2128 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2129 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2131 //===----------------------------------------------------------------------===//
2132 // SSE 1 & 2 - Load/Store XCSR register
2133 //===----------------------------------------------------------------------===//
2135 let isAsmParserOnly = 1 in {
2136 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2137 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2138 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2139 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2142 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2143 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2144 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2145 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2147 //===---------------------------------------------------------------------===//
2148 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2149 //===---------------------------------------------------------------------===//
2151 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2153 let isAsmParserOnly = 1 in {
2154 let neverHasSideEffects = 1 in {
2155 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2156 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2157 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2158 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2160 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2161 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2162 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2163 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2165 let canFoldAsLoad = 1, mayLoad = 1 in {
2166 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2167 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2168 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2169 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2170 let Predicates = [HasAVX] in {
2171 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2172 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2173 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2174 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2178 let mayStore = 1 in {
2179 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2180 (ins i128mem:$dst, VR128:$src),
2181 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2182 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2183 (ins i256mem:$dst, VR256:$src),
2184 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2185 let Predicates = [HasAVX] in {
2186 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2187 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2188 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2189 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2194 let neverHasSideEffects = 1 in
2195 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2196 "movdqa\t{$src, $dst|$dst, $src}", []>;
2198 let canFoldAsLoad = 1, mayLoad = 1 in {
2199 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2200 "movdqa\t{$src, $dst|$dst, $src}",
2201 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2202 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2203 "movdqu\t{$src, $dst|$dst, $src}",
2204 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2205 XS, Requires<[HasSSE2]>;
2208 let mayStore = 1 in {
2209 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2210 "movdqa\t{$src, $dst|$dst, $src}",
2211 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2212 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2213 "movdqu\t{$src, $dst|$dst, $src}",
2214 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2215 XS, Requires<[HasSSE2]>;
2218 // Intrinsic forms of MOVDQU load and store
2219 let isAsmParserOnly = 1 in {
2220 let canFoldAsLoad = 1 in
2221 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2222 "vmovdqu\t{$src, $dst|$dst, $src}",
2223 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2224 XS, VEX, Requires<[HasAVX]>;
2225 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2226 "vmovdqu\t{$src, $dst|$dst, $src}",
2227 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2228 XS, VEX, Requires<[HasAVX]>;
2231 let canFoldAsLoad = 1 in
2232 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2233 "movdqu\t{$src, $dst|$dst, $src}",
2234 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2235 XS, Requires<[HasSSE2]>;
2236 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2237 "movdqu\t{$src, $dst|$dst, $src}",
2238 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2239 XS, Requires<[HasSSE2]>;
2241 } // ExeDomain = SSEPackedInt
2243 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2244 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2245 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2247 //===---------------------------------------------------------------------===//
2248 // SSE2 - Packed Integer Arithmetic Instructions
2249 //===---------------------------------------------------------------------===//
2251 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2253 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2254 bit IsCommutable = 0, bit Is2Addr = 1> {
2255 let isCommutable = IsCommutable in
2256 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2257 (ins VR128:$src1, VR128:$src2),
2259 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2260 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2261 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2262 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2263 (ins VR128:$src1, i128mem:$src2),
2265 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2266 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2267 [(set VR128:$dst, (IntId VR128:$src1,
2268 (bitconvert (memopv2i64 addr:$src2))))]>;
2271 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2272 string OpcodeStr, Intrinsic IntId,
2273 Intrinsic IntId2, bit Is2Addr = 1> {
2274 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2275 (ins VR128:$src1, VR128:$src2),
2277 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2278 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2279 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2280 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2281 (ins VR128:$src1, i128mem:$src2),
2283 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2284 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2285 [(set VR128:$dst, (IntId VR128:$src1,
2286 (bitconvert (memopv2i64 addr:$src2))))]>;
2287 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2288 (ins VR128:$src1, i32i8imm:$src2),
2290 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2291 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2292 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2295 /// PDI_binop_rm - Simple SSE2 binary operator.
2296 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2297 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2298 let isCommutable = IsCommutable in
2299 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2300 (ins VR128:$src1, VR128:$src2),
2302 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2303 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2304 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2305 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2306 (ins VR128:$src1, i128mem:$src2),
2308 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2309 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2310 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2311 (bitconvert (memopv2i64 addr:$src2)))))]>;
2314 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2316 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2317 /// to collapse (bitconvert VT to VT) into its operand.
2319 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2320 bit IsCommutable = 0, bit Is2Addr = 1> {
2321 let isCommutable = IsCommutable in
2322 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2323 (ins VR128:$src1, VR128:$src2),
2325 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2326 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2327 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2328 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2329 (ins VR128:$src1, i128mem:$src2),
2331 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2332 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2333 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2336 } // ExeDomain = SSEPackedInt
2338 // 128-bit Integer Arithmetic
2340 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2341 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2342 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2343 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2344 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2345 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2346 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2347 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2348 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2349 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2352 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2354 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2356 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2358 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2360 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2362 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2364 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2366 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2368 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2370 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2372 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2374 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2376 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2378 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2380 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2382 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2384 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2386 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2388 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2392 let Constraints = "$src1 = $dst" in {
2393 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2394 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2395 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2396 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2397 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2398 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2399 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2400 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2401 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2404 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2405 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2406 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2407 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2408 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2409 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2410 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2411 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2412 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2413 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2414 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2415 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2416 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2417 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2418 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2419 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2420 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2421 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2422 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2424 } // Constraints = "$src1 = $dst"
2426 //===---------------------------------------------------------------------===//
2427 // SSE2 - Packed Integer Logical Instructions
2428 //===---------------------------------------------------------------------===//
2430 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2431 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2432 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2434 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2435 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2437 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2438 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2441 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2442 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2444 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2445 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2447 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2448 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2451 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2452 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2454 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2455 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2458 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2459 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2460 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2462 let ExeDomain = SSEPackedInt in {
2463 let neverHasSideEffects = 1 in {
2464 // 128-bit logical shifts.
2465 def VPSLLDQri : PDIi8<0x73, MRM7r,
2466 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2467 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2469 def VPSRLDQri : PDIi8<0x73, MRM3r,
2470 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2471 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2473 // PSRADQri doesn't exist in SSE[1-3].
2475 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2476 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2477 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2478 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2479 VR128:$src2)))]>, VEX_4V;
2481 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2482 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2483 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2484 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2485 (memopv2i64 addr:$src2))))]>,
2490 let Constraints = "$src1 = $dst" in {
2491 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2492 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2493 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2494 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2495 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2496 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2498 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2499 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2500 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2501 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2502 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2503 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2505 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2506 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2507 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2508 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2510 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2511 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2512 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2514 let ExeDomain = SSEPackedInt in {
2515 let neverHasSideEffects = 1 in {
2516 // 128-bit logical shifts.
2517 def PSLLDQri : PDIi8<0x73, MRM7r,
2518 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2519 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2520 def PSRLDQri : PDIi8<0x73, MRM3r,
2521 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2522 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2523 // PSRADQri doesn't exist in SSE[1-3].
2525 def PANDNrr : PDI<0xDF, MRMSrcReg,
2526 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2527 "pandn\t{$src2, $dst|$dst, $src2}",
2528 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2531 def PANDNrm : PDI<0xDF, MRMSrcMem,
2532 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2533 "pandn\t{$src2, $dst|$dst, $src2}",
2534 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2535 (memopv2i64 addr:$src2))))]>;
2537 } // Constraints = "$src1 = $dst"
2539 let Predicates = [HasAVX] in {
2540 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2541 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2542 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2543 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2544 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2545 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2546 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2547 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2548 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2549 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2551 // Shift up / down and insert zero's.
2552 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2553 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2554 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2555 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2558 let Predicates = [HasSSE2] in {
2559 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2560 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2561 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2562 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2563 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2564 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2565 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2566 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2567 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2568 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2570 // Shift up / down and insert zero's.
2571 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2572 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2573 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2574 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2577 //===---------------------------------------------------------------------===//
2578 // SSE2 - Packed Integer Comparison Instructions
2579 //===---------------------------------------------------------------------===//
2581 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2582 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2584 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2586 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2588 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2590 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2592 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2596 let Constraints = "$src1 = $dst" in {
2597 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2598 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2599 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2600 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2601 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2602 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2603 } // Constraints = "$src1 = $dst"
2605 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2606 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2607 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2608 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2609 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2610 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2611 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2612 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2613 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2614 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2615 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2616 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2618 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2619 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2620 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2621 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2622 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2623 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2624 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2625 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2626 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2627 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2628 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2629 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2631 //===---------------------------------------------------------------------===//
2632 // SSE2 - Packed Integer Pack Instructions
2633 //===---------------------------------------------------------------------===//
2635 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2636 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2638 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2640 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2644 let Constraints = "$src1 = $dst" in {
2645 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2646 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2647 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2648 } // Constraints = "$src1 = $dst"
2650 //===---------------------------------------------------------------------===//
2651 // SSE2 - Packed Integer Shuffle Instructions
2652 //===---------------------------------------------------------------------===//
2654 let ExeDomain = SSEPackedInt in {
2655 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2657 def ri : Ii8<0x70, MRMSrcReg,
2658 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2659 !strconcat(OpcodeStr,
2660 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2661 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2663 def mi : Ii8<0x70, MRMSrcMem,
2664 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2665 !strconcat(OpcodeStr,
2666 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2667 [(set VR128:$dst, (vt (pshuf_frag:$src2
2668 (bc_frag (memopv2i64 addr:$src1)),
2671 } // ExeDomain = SSEPackedInt
2673 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2674 let AddedComplexity = 5 in
2675 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2678 // SSE2 with ImmT == Imm8 and XS prefix.
2679 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2682 // SSE2 with ImmT == Imm8 and XD prefix.
2683 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2687 let Predicates = [HasSSE2] in {
2688 let AddedComplexity = 5 in
2689 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2691 // SSE2 with ImmT == Imm8 and XS prefix.
2692 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2694 // SSE2 with ImmT == Imm8 and XD prefix.
2695 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2698 //===---------------------------------------------------------------------===//
2699 // SSE2 - Packed Integer Unpack Instructions
2700 //===---------------------------------------------------------------------===//
2702 let ExeDomain = SSEPackedInt in {
2703 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2704 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2705 def rr : PDI<opc, MRMSrcReg,
2706 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2708 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2709 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2710 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2711 def rm : PDI<opc, MRMSrcMem,
2712 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2714 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2715 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2716 [(set VR128:$dst, (unp_frag VR128:$src1,
2717 (bc_frag (memopv2i64
2721 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2722 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2724 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2726 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2729 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2730 /// knew to collapse (bitconvert VT to VT) into its operand.
2731 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2732 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2733 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2735 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2736 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2737 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2738 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2740 (v2i64 (unpckl VR128:$src1,
2741 (memopv2i64 addr:$src2))))]>, VEX_4V;
2743 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2745 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2747 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2750 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2751 /// knew to collapse (bitconvert VT to VT) into its operand.
2752 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2753 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2754 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2756 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2757 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2758 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2759 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2761 (v2i64 (unpckh VR128:$src1,
2762 (memopv2i64 addr:$src2))))]>, VEX_4V;
2765 let Constraints = "$src1 = $dst" in {
2766 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2767 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2768 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2770 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2771 /// knew to collapse (bitconvert VT to VT) into its operand.
2772 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2773 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2774 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2776 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2777 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2778 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2779 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2781 (v2i64 (unpckl VR128:$src1,
2782 (memopv2i64 addr:$src2))))]>;
2784 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2785 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2786 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2788 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2789 /// knew to collapse (bitconvert VT to VT) into its operand.
2790 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2791 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2792 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2794 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2795 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2796 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2797 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2799 (v2i64 (unpckh VR128:$src1,
2800 (memopv2i64 addr:$src2))))]>;
2803 } // ExeDomain = SSEPackedInt
2805 //===---------------------------------------------------------------------===//
2806 // SSE2 - Packed Integer Extract and Insert
2807 //===---------------------------------------------------------------------===//
2809 let ExeDomain = SSEPackedInt in {
2810 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2811 def rri : Ii8<0xC4, MRMSrcReg,
2812 (outs VR128:$dst), (ins VR128:$src1,
2813 GR32:$src2, i32i8imm:$src3),
2815 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2816 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2818 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2819 def rmi : Ii8<0xC4, MRMSrcMem,
2820 (outs VR128:$dst), (ins VR128:$src1,
2821 i16mem:$src2, i32i8imm:$src3),
2823 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2824 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2826 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2831 let isAsmParserOnly = 1, Predicates = [HasAVX] in
2832 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2833 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2834 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2835 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2836 imm:$src2))]>, OpSize, VEX;
2837 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2838 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2839 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2840 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2844 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2845 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2846 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2847 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2848 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2849 []>, OpSize, VEX_4V;
2852 let Constraints = "$src1 = $dst" in
2853 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2855 } // ExeDomain = SSEPackedInt
2857 //===---------------------------------------------------------------------===//
2858 // SSE2 - Packed Mask Creation
2859 //===---------------------------------------------------------------------===//
2861 let ExeDomain = SSEPackedInt in {
2863 let isAsmParserOnly = 1 in {
2864 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2865 "pmovmskb\t{$src, $dst|$dst, $src}",
2866 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2867 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2868 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2870 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2871 "pmovmskb\t{$src, $dst|$dst, $src}",
2872 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2874 } // ExeDomain = SSEPackedInt
2876 //===---------------------------------------------------------------------===//
2877 // SSE2 - Conditional Store
2878 //===---------------------------------------------------------------------===//
2880 let ExeDomain = SSEPackedInt in {
2882 let isAsmParserOnly = 1 in {
2884 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2885 (ins VR128:$src, VR128:$mask),
2886 "maskmovdqu\t{$mask, $src|$src, $mask}",
2887 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2889 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2890 (ins VR128:$src, VR128:$mask),
2891 "maskmovdqu\t{$mask, $src|$src, $mask}",
2892 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2896 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2897 "maskmovdqu\t{$mask, $src|$src, $mask}",
2898 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2900 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2901 "maskmovdqu\t{$mask, $src|$src, $mask}",
2902 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2904 } // ExeDomain = SSEPackedInt
2906 //===---------------------------------------------------------------------===//
2907 // SSE2 - Move Doubleword
2908 //===---------------------------------------------------------------------===//
2910 // Move Int Doubleword to Packed Double Int
2911 let isAsmParserOnly = 1 in {
2912 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2913 "movd\t{$src, $dst|$dst, $src}",
2915 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2916 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2917 "movd\t{$src, $dst|$dst, $src}",
2919 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2922 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2923 "movd\t{$src, $dst|$dst, $src}",
2925 (v4i32 (scalar_to_vector GR32:$src)))]>;
2926 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2927 "movd\t{$src, $dst|$dst, $src}",
2929 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2930 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2931 "mov{d|q}\t{$src, $dst|$dst, $src}",
2933 (v2i64 (scalar_to_vector GR64:$src)))]>;
2934 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2935 "mov{d|q}\t{$src, $dst|$dst, $src}",
2936 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2939 // Move Int Doubleword to Single Scalar
2940 let isAsmParserOnly = 1 in {
2941 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2942 "movd\t{$src, $dst|$dst, $src}",
2943 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2945 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2946 "movd\t{$src, $dst|$dst, $src}",
2947 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2950 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2951 "movd\t{$src, $dst|$dst, $src}",
2952 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2954 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2955 "movd\t{$src, $dst|$dst, $src}",
2956 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2958 // Move Packed Doubleword Int to Packed Double Int
2959 let isAsmParserOnly = 1 in {
2960 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2961 "movd\t{$src, $dst|$dst, $src}",
2962 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2964 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2965 (ins i32mem:$dst, VR128:$src),
2966 "movd\t{$src, $dst|$dst, $src}",
2967 [(store (i32 (vector_extract (v4i32 VR128:$src),
2968 (iPTR 0))), addr:$dst)]>, VEX;
2970 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2971 "movd\t{$src, $dst|$dst, $src}",
2972 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2974 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2975 "movd\t{$src, $dst|$dst, $src}",
2976 [(store (i32 (vector_extract (v4i32 VR128:$src),
2977 (iPTR 0))), addr:$dst)]>;
2979 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2980 "mov{d|q}\t{$src, $dst|$dst, $src}",
2981 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2983 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2984 "movq\t{$src, $dst|$dst, $src}",
2985 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2987 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2988 "mov{d|q}\t{$src, $dst|$dst, $src}",
2989 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2990 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2991 "movq\t{$src, $dst|$dst, $src}",
2992 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
2994 // Move Scalar Single to Double Int
2995 let isAsmParserOnly = 1 in {
2996 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2997 "movd\t{$src, $dst|$dst, $src}",
2998 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2999 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3000 "movd\t{$src, $dst|$dst, $src}",
3001 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3003 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3004 "movd\t{$src, $dst|$dst, $src}",
3005 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3006 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3007 "movd\t{$src, $dst|$dst, $src}",
3008 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3010 // movd / movq to XMM register zero-extends
3011 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3012 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3013 "movd\t{$src, $dst|$dst, $src}",
3014 [(set VR128:$dst, (v4i32 (X86vzmovl
3015 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3017 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3018 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3019 [(set VR128:$dst, (v2i64 (X86vzmovl
3020 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3023 let AddedComplexity = 15 in {
3024 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3025 "movd\t{$src, $dst|$dst, $src}",
3026 [(set VR128:$dst, (v4i32 (X86vzmovl
3027 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3028 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3029 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3030 [(set VR128:$dst, (v2i64 (X86vzmovl
3031 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3034 let AddedComplexity = 20 in {
3035 let isAsmParserOnly = 1 in
3036 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3037 "movd\t{$src, $dst|$dst, $src}",
3039 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3040 (loadi32 addr:$src))))))]>,
3042 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3043 "movd\t{$src, $dst|$dst, $src}",
3045 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3046 (loadi32 addr:$src))))))]>;
3048 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3049 (MOVZDI2PDIrm addr:$src)>;
3050 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3051 (MOVZDI2PDIrm addr:$src)>;
3052 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3053 (MOVZDI2PDIrm addr:$src)>;
3056 //===---------------------------------------------------------------------===//
3057 // SSE2 - Move Quadword
3058 //===---------------------------------------------------------------------===//
3060 // Move Quadword Int to Packed Quadword Int
3061 let isAsmParserOnly = 1 in
3062 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3063 "vmovq\t{$src, $dst|$dst, $src}",
3065 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3066 VEX, Requires<[HasAVX]>;
3067 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3068 "movq\t{$src, $dst|$dst, $src}",
3070 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3071 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3073 // Move Packed Quadword Int to Quadword Int
3074 let isAsmParserOnly = 1 in
3075 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3076 "movq\t{$src, $dst|$dst, $src}",
3077 [(store (i64 (vector_extract (v2i64 VR128:$src),
3078 (iPTR 0))), addr:$dst)]>, VEX;
3079 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3080 "movq\t{$src, $dst|$dst, $src}",
3081 [(store (i64 (vector_extract (v2i64 VR128:$src),
3082 (iPTR 0))), addr:$dst)]>;
3084 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3085 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3087 // Store / copy lower 64-bits of a XMM register.
3088 let isAsmParserOnly = 1 in
3089 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3090 "movq\t{$src, $dst|$dst, $src}",
3091 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3092 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3093 "movq\t{$src, $dst|$dst, $src}",
3094 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3096 let AddedComplexity = 20, isAsmParserOnly = 1 in
3097 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3098 "vmovq\t{$src, $dst|$dst, $src}",
3100 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3101 (loadi64 addr:$src))))))]>,
3102 XS, VEX, Requires<[HasAVX]>;
3104 let AddedComplexity = 20 in {
3105 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3106 "movq\t{$src, $dst|$dst, $src}",
3108 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3109 (loadi64 addr:$src))))))]>,
3110 XS, Requires<[HasSSE2]>;
3112 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3113 (MOVZQI2PQIrm addr:$src)>;
3114 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3115 (MOVZQI2PQIrm addr:$src)>;
3116 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3119 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3120 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3121 let isAsmParserOnly = 1, AddedComplexity = 15 in
3122 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3123 "vmovq\t{$src, $dst|$dst, $src}",
3124 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3125 XS, VEX, Requires<[HasAVX]>;
3126 let AddedComplexity = 15 in
3127 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3128 "movq\t{$src, $dst|$dst, $src}",
3129 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3130 XS, Requires<[HasSSE2]>;
3132 let AddedComplexity = 20, isAsmParserOnly = 1 in
3133 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3134 "vmovq\t{$src, $dst|$dst, $src}",
3135 [(set VR128:$dst, (v2i64 (X86vzmovl
3136 (loadv2i64 addr:$src))))]>,
3137 XS, VEX, Requires<[HasAVX]>;
3138 let AddedComplexity = 20 in {
3139 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3140 "movq\t{$src, $dst|$dst, $src}",
3141 [(set VR128:$dst, (v2i64 (X86vzmovl
3142 (loadv2i64 addr:$src))))]>,
3143 XS, Requires<[HasSSE2]>;
3145 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3146 (MOVZPQILo2PQIrm addr:$src)>;
3149 // Instructions to match in the assembler
3150 let isAsmParserOnly = 1 in {
3151 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3152 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3153 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3154 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3155 // Recognize "movd" with GR64 destination, but encode as a "movq"
3156 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3157 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3160 // Instructions for the disassembler
3161 // xr = XMM register
3164 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3165 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3166 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3167 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3168 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3170 //===---------------------------------------------------------------------===//
3171 // SSE2 - Misc Instructions
3172 //===---------------------------------------------------------------------===//
3175 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3176 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3177 TB, Requires<[HasSSE2]>;
3179 // Load, store, and memory fence
3180 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3181 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3182 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3183 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3184 def : Pat<(X86LFence), (LFENCE)>;
3185 def : Pat<(X86MFence), (MFENCE)>;
3188 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3189 // was introduced with SSE2, it's backward compatible.
3190 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3192 // Alias instructions that map zero vector to pxor / xorp* for sse.
3193 // We set canFoldAsLoad because this can be converted to a constant-pool
3194 // load of an all-ones value if folding it would be beneficial.
3195 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3196 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3197 // FIXME: Change encoding to pseudo.
3198 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3199 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3201 //===---------------------------------------------------------------------===//
3202 // SSE3 - Conversion Instructions
3203 //===---------------------------------------------------------------------===//
3205 // Convert Packed Double FP to Packed DW Integers
3206 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3207 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3208 // register, but the same isn't true when using memory operands instead.
3209 // Provide other assembly rr and rm forms to address this explicitly.
3210 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3211 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3212 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3213 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3216 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3217 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3218 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3219 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3222 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3223 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3224 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3225 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3228 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3229 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3230 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3231 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3233 // Convert Packed DW Integers to Packed Double FP
3234 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3235 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3236 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3237 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3238 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3239 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3240 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3241 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3242 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3245 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3246 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3247 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3248 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3250 // AVX 256-bit register conversion intrinsics
3251 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3252 (VCVTDQ2PDYrr VR128:$src)>;
3253 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3254 (VCVTDQ2PDYrm addr:$src)>;
3256 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3257 (VCVTPD2DQYrr VR256:$src)>;
3258 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3259 (VCVTPD2DQYrm addr:$src)>;
3261 //===---------------------------------------------------------------------===//
3262 // SSE3 - Move Instructions
3263 //===---------------------------------------------------------------------===//
3265 // Replicate Single FP
3266 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3267 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3268 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3269 [(set VR128:$dst, (v4f32 (rep_frag
3270 VR128:$src, (undef))))]>;
3271 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3272 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3273 [(set VR128:$dst, (rep_frag
3274 (memopv4f32 addr:$src), (undef)))]>;
3277 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3279 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3280 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3281 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3282 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3285 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3286 // FIXME: Merge above classes when we have patterns for the ymm version
3287 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3288 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3289 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3290 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3292 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3293 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3295 // Replicate Double FP
3296 multiclass sse3_replicate_dfp<string OpcodeStr> {
3297 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3298 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3299 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3300 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3301 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3303 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3307 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3308 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3309 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3311 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3312 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3316 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3317 // FIXME: Merge above classes when we have patterns for the ymm version
3318 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3319 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3321 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3323 // Move Unaligned Integer
3324 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3325 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3326 "vlddqu\t{$src, $dst|$dst, $src}",
3327 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3328 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3329 "vlddqu\t{$src, $dst|$dst, $src}",
3330 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3332 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3333 "lddqu\t{$src, $dst|$dst, $src}",
3334 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3336 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3338 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3340 // Several Move patterns
3341 let AddedComplexity = 5 in {
3342 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3343 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3344 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3345 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3346 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3347 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3348 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3349 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3352 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3353 let AddedComplexity = 15 in
3354 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3355 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3356 let AddedComplexity = 20 in
3357 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3358 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3360 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3361 let AddedComplexity = 15 in
3362 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3363 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3364 let AddedComplexity = 20 in
3365 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3366 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3368 //===---------------------------------------------------------------------===//
3369 // SSE3 - Arithmetic
3370 //===---------------------------------------------------------------------===//
3372 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3373 X86MemOperand x86memop, bit Is2Addr = 1> {
3374 def rr : I<0xD0, MRMSrcReg,
3375 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3377 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3378 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3379 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3380 def rm : I<0xD0, MRMSrcMem,
3381 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3383 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3384 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3385 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3388 let isAsmParserOnly = 1, Predicates = [HasAVX],
3389 ExeDomain = SSEPackedDouble in {
3390 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3391 f128mem, 0>, XD, VEX_4V;
3392 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3393 f128mem, 0>, OpSize, VEX_4V;
3394 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3395 f256mem, 0>, XD, VEX_4V;
3396 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3397 f256mem, 0>, OpSize, VEX_4V;
3399 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3400 ExeDomain = SSEPackedDouble in {
3401 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3403 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3404 f128mem>, TB, OpSize;
3407 //===---------------------------------------------------------------------===//
3408 // SSE3 Instructions
3409 //===---------------------------------------------------------------------===//
3412 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3413 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3414 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3416 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3417 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3418 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3420 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3422 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3423 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3424 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3426 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3427 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3428 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3430 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3431 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3432 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3434 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3436 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3437 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3438 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3441 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3442 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3443 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3444 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3445 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3446 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3447 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3448 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3449 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3450 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3451 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3452 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3453 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3454 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3455 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3456 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3457 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3460 let Constraints = "$src1 = $dst" in {
3461 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3462 int_x86_sse3_hadd_ps>;
3463 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3464 int_x86_sse3_hadd_pd>;
3465 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3466 int_x86_sse3_hsub_ps>;
3467 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3468 int_x86_sse3_hsub_pd>;
3471 //===---------------------------------------------------------------------===//
3472 // SSSE3 - Packed Absolute Instructions
3473 //===---------------------------------------------------------------------===//
3476 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3477 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3478 PatFrag mem_frag128, Intrinsic IntId128> {
3479 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3481 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3482 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3485 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3487 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3490 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3493 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3494 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3495 int_x86_ssse3_pabs_b_128>, VEX;
3496 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3497 int_x86_ssse3_pabs_w_128>, VEX;
3498 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3499 int_x86_ssse3_pabs_d_128>, VEX;
3502 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3503 int_x86_ssse3_pabs_b_128>;
3504 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3505 int_x86_ssse3_pabs_w_128>;
3506 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3507 int_x86_ssse3_pabs_d_128>;
3509 //===---------------------------------------------------------------------===//
3510 // SSSE3 - Packed Binary Operator Instructions
3511 //===---------------------------------------------------------------------===//
3513 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3514 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3515 PatFrag mem_frag128, Intrinsic IntId128,
3517 let isCommutable = 1 in
3518 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3519 (ins VR128:$src1, VR128:$src2),
3521 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3522 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3523 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3525 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3526 (ins VR128:$src1, i128mem:$src2),
3528 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3529 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3531 (IntId128 VR128:$src1,
3532 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3535 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3536 let isCommutable = 0 in {
3537 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3538 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3539 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3540 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3541 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3542 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3543 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3544 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3545 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3546 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3547 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3548 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3549 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3550 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3551 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3552 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3553 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3554 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3555 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3556 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3557 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3558 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3560 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3561 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3564 // None of these have i8 immediate fields.
3565 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3566 let isCommutable = 0 in {
3567 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3568 int_x86_ssse3_phadd_w_128>;
3569 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3570 int_x86_ssse3_phadd_d_128>;
3571 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3572 int_x86_ssse3_phadd_sw_128>;
3573 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3574 int_x86_ssse3_phsub_w_128>;
3575 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3576 int_x86_ssse3_phsub_d_128>;
3577 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3578 int_x86_ssse3_phsub_sw_128>;
3579 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3580 int_x86_ssse3_pmadd_ub_sw_128>;
3581 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3582 int_x86_ssse3_pshuf_b_128>;
3583 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3584 int_x86_ssse3_psign_b_128>;
3585 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3586 int_x86_ssse3_psign_w_128>;
3587 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3588 int_x86_ssse3_psign_d_128>;
3590 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3591 int_x86_ssse3_pmul_hr_sw_128>;
3594 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3595 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3596 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3597 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3599 //===---------------------------------------------------------------------===//
3600 // SSSE3 - Packed Align Instruction Patterns
3601 //===---------------------------------------------------------------------===//
3603 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3604 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3605 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3607 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3609 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3611 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3612 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3614 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3616 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3620 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3621 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3622 let Constraints = "$src1 = $dst" in
3623 defm PALIGN : ssse3_palign<"palignr">;
3625 let AddedComplexity = 5 in {
3626 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3627 (PALIGNR128rr VR128:$src2, VR128:$src1,
3628 (SHUFFLE_get_palign_imm VR128:$src3))>,
3629 Requires<[HasSSSE3]>;
3630 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3631 (PALIGNR128rr VR128:$src2, VR128:$src1,
3632 (SHUFFLE_get_palign_imm VR128:$src3))>,
3633 Requires<[HasSSSE3]>;
3634 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3635 (PALIGNR128rr VR128:$src2, VR128:$src1,
3636 (SHUFFLE_get_palign_imm VR128:$src3))>,
3637 Requires<[HasSSSE3]>;
3638 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3639 (PALIGNR128rr VR128:$src2, VR128:$src1,
3640 (SHUFFLE_get_palign_imm VR128:$src3))>,
3641 Requires<[HasSSSE3]>;
3644 //===---------------------------------------------------------------------===//
3645 // SSSE3 Misc Instructions
3646 //===---------------------------------------------------------------------===//
3648 // Thread synchronization
3649 let usesCustomInserter = 1 in {
3650 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
3651 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
3652 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
3653 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
3656 let Uses = [EAX, ECX, EDX] in
3657 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
3658 Requires<[HasSSE3]>;
3659 let Uses = [ECX, EAX] in
3660 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
3661 Requires<[HasSSE3]>;
3663 //===---------------------------------------------------------------------===//
3664 // Non-Instruction Patterns
3665 //===---------------------------------------------------------------------===//
3667 // extload f32 -> f64. This matches load+fextend because we have a hack in
3668 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3670 // Since these loads aren't folded into the fextend, we have to match it
3672 let Predicates = [HasSSE2] in
3673 def : Pat<(fextend (loadf32 addr:$src)),
3674 (CVTSS2SDrm addr:$src)>;
3677 let Predicates = [HasSSE2] in {
3678 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3679 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3680 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3681 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3682 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3683 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3684 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3685 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3686 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3687 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3688 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3689 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3690 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3691 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3692 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3693 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3694 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3695 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3696 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3697 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3698 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3699 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3700 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3701 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3702 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3703 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3704 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3705 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3706 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3707 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3710 // Move scalar to XMM zero-extended
3711 // movd to XMM register zero-extends
3712 let AddedComplexity = 15 in {
3713 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3714 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3715 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3716 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3717 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3718 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3719 (MOVSSrr (v4f32 (V_SET0PS)),
3720 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3721 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3722 (MOVSSrr (v4i32 (V_SET0PI)),
3723 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3726 // Splat v2f64 / v2i64
3727 let AddedComplexity = 10 in {
3728 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3729 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3730 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3731 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3732 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3733 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3734 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3735 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3738 // Special unary SHUFPSrri case.
3739 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3740 (SHUFPSrri VR128:$src1, VR128:$src1,
3741 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3742 let AddedComplexity = 5 in
3743 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3744 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3745 Requires<[HasSSE2]>;
3746 // Special unary SHUFPDrri case.
3747 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3748 (SHUFPDrri VR128:$src1, VR128:$src1,
3749 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3750 Requires<[HasSSE2]>;
3751 // Special unary SHUFPDrri case.
3752 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3753 (SHUFPDrri VR128:$src1, VR128:$src1,
3754 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3755 Requires<[HasSSE2]>;
3756 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3757 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3758 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3759 Requires<[HasSSE2]>;
3761 // Special binary v4i32 shuffle cases with SHUFPS.
3762 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3763 (SHUFPSrri VR128:$src1, VR128:$src2,
3764 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3765 Requires<[HasSSE2]>;
3766 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3767 (SHUFPSrmi VR128:$src1, addr:$src2,
3768 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3769 Requires<[HasSSE2]>;
3770 // Special binary v2i64 shuffle cases using SHUFPDrri.
3771 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3772 (SHUFPDrri VR128:$src1, VR128:$src2,
3773 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3774 Requires<[HasSSE2]>;
3776 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3777 let AddedComplexity = 15 in {
3778 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3779 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3780 Requires<[OptForSpeed, HasSSE2]>;
3781 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3782 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3783 Requires<[OptForSpeed, HasSSE2]>;
3785 let AddedComplexity = 10 in {
3786 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3787 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3788 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3789 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3790 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3791 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3792 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3793 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3796 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3797 let AddedComplexity = 15 in {
3798 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3799 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3800 Requires<[OptForSpeed, HasSSE2]>;
3801 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3802 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3803 Requires<[OptForSpeed, HasSSE2]>;
3805 let AddedComplexity = 10 in {
3806 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3807 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3808 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3809 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3810 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3811 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3812 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3813 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3816 let AddedComplexity = 20 in {
3817 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3818 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3819 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3821 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3822 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3823 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3825 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3826 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3827 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3828 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3829 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3832 let AddedComplexity = 20 in {
3833 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3834 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3835 (MOVLPSrm VR128:$src1, addr:$src2)>;
3836 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3837 (MOVLPDrm VR128:$src1, addr:$src2)>;
3838 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3839 (MOVLPSrm VR128:$src1, addr:$src2)>;
3840 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3841 (MOVLPDrm VR128:$src1, addr:$src2)>;
3844 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3845 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3846 (MOVLPSmr addr:$src1, VR128:$src2)>;
3847 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3848 (MOVLPDmr addr:$src1, VR128:$src2)>;
3849 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3851 (MOVLPSmr addr:$src1, VR128:$src2)>;
3852 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3853 (MOVLPDmr addr:$src1, VR128:$src2)>;
3855 let AddedComplexity = 15 in {
3856 // Setting the lowest element in the vector.
3857 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3858 (MOVSSrr (v4i32 VR128:$src1),
3859 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3860 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3861 (MOVSDrr (v2i64 VR128:$src1),
3862 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3864 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3865 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3866 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3867 Requires<[HasSSE2]>;
3868 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3869 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3870 Requires<[HasSSE2]>;
3873 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3874 // fall back to this for SSE1)
3875 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3876 (SHUFPSrri VR128:$src2, VR128:$src1,
3877 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3879 // Set lowest element and zero upper elements.
3880 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3881 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3883 // Some special case pandn patterns.
3884 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3886 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3887 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3889 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3890 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3892 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3894 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3895 (memop addr:$src2))),
3896 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3897 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3898 (memop addr:$src2))),
3899 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3900 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3901 (memop addr:$src2))),
3902 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3904 // vector -> vector casts
3905 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3906 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3907 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3908 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3910 // Use movaps / movups for SSE integer load / store (one byte shorter).
3911 let Predicates = [HasSSE1] in {
3912 def : Pat<(alignedloadv4i32 addr:$src),
3913 (MOVAPSrm addr:$src)>;
3914 def : Pat<(loadv4i32 addr:$src),
3915 (MOVUPSrm addr:$src)>;
3916 def : Pat<(alignedloadv2i64 addr:$src),
3917 (MOVAPSrm addr:$src)>;
3918 def : Pat<(loadv2i64 addr:$src),
3919 (MOVUPSrm addr:$src)>;
3921 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3922 (MOVAPSmr addr:$dst, VR128:$src)>;
3923 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3924 (MOVAPSmr addr:$dst, VR128:$src)>;
3925 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3926 (MOVAPSmr addr:$dst, VR128:$src)>;
3927 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3928 (MOVAPSmr addr:$dst, VR128:$src)>;
3929 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3930 (MOVUPSmr addr:$dst, VR128:$src)>;
3931 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3932 (MOVUPSmr addr:$dst, VR128:$src)>;
3933 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3934 (MOVUPSmr addr:$dst, VR128:$src)>;
3935 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3936 (MOVUPSmr addr:$dst, VR128:$src)>;
3939 // Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
3940 let Predicates = [HasAVX] in {
3941 def : Pat<(alignedloadv4i32 addr:$src),
3942 (VMOVAPSrm addr:$src)>;
3943 def : Pat<(loadv4i32 addr:$src),
3944 (VMOVUPSrm addr:$src)>;
3945 def : Pat<(alignedloadv2i64 addr:$src),
3946 (VMOVAPSrm addr:$src)>;
3947 def : Pat<(loadv2i64 addr:$src),
3948 (VMOVUPSrm addr:$src)>;
3950 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3951 (VMOVAPSmr addr:$dst, VR128:$src)>;
3952 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3953 (VMOVAPSmr addr:$dst, VR128:$src)>;
3954 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3955 (VMOVAPSmr addr:$dst, VR128:$src)>;
3956 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3957 (VMOVAPSmr addr:$dst, VR128:$src)>;
3958 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3959 (VMOVUPSmr addr:$dst, VR128:$src)>;
3960 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3961 (VMOVUPSmr addr:$dst, VR128:$src)>;
3962 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3963 (VMOVUPSmr addr:$dst, VR128:$src)>;
3964 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3965 (VMOVUPSmr addr:$dst, VR128:$src)>;
3968 //===----------------------------------------------------------------------===//
3969 // SSE4.1 - Packed Move with Sign/Zero Extend
3970 //===----------------------------------------------------------------------===//
3972 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3973 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3974 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3975 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3977 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3978 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3980 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3984 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3985 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3987 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3989 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3991 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3993 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3995 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
3999 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4000 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4001 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4002 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4003 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4004 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4006 // Common patterns involving scalar load.
4007 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4008 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4009 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4010 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4012 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4013 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4014 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4015 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4017 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4018 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4019 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4020 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4022 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4023 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4024 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4025 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4027 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4028 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4029 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4030 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4032 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4033 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4034 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4035 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4038 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4039 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4040 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4041 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4043 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4044 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4046 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4050 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4051 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4053 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4055 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4057 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4061 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4062 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4063 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4064 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4066 // Common patterns involving scalar load
4067 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4068 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4069 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4070 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4072 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4073 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4074 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4075 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4078 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4079 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4080 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4081 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4083 // Expecting a i16 load any extended to i32 value.
4084 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4085 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4086 [(set VR128:$dst, (IntId (bitconvert
4087 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4091 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4092 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4094 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4097 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4098 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4100 // Common patterns involving scalar load
4101 def : Pat<(int_x86_sse41_pmovsxbq
4102 (bitconvert (v4i32 (X86vzmovl
4103 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4104 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4106 def : Pat<(int_x86_sse41_pmovzxbq
4107 (bitconvert (v4i32 (X86vzmovl
4108 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4109 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4111 //===----------------------------------------------------------------------===//
4112 // SSE4.1 - Extract Instructions
4113 //===----------------------------------------------------------------------===//
4115 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4116 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4117 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4118 (ins VR128:$src1, i32i8imm:$src2),
4119 !strconcat(OpcodeStr,
4120 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4121 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4123 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4124 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4125 !strconcat(OpcodeStr,
4126 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4129 // There's an AssertZext in the way of writing the store pattern
4130 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4133 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4134 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4135 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4136 (ins VR128:$src1, i32i8imm:$src2),
4137 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4140 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4143 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4144 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4145 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4146 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4147 !strconcat(OpcodeStr,
4148 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4151 // There's an AssertZext in the way of writing the store pattern
4152 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4155 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4156 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4158 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4161 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4162 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4163 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4164 (ins VR128:$src1, i32i8imm:$src2),
4165 !strconcat(OpcodeStr,
4166 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4168 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4169 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4170 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4171 !strconcat(OpcodeStr,
4172 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4173 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4174 addr:$dst)]>, OpSize;
4177 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4178 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4180 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4182 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4183 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4184 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4185 (ins VR128:$src1, i32i8imm:$src2),
4186 !strconcat(OpcodeStr,
4187 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4189 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4190 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4191 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4192 !strconcat(OpcodeStr,
4193 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4194 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4195 addr:$dst)]>, OpSize, REX_W;
4198 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4199 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4201 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4203 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4205 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4206 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4207 (ins VR128:$src1, i32i8imm:$src2),
4208 !strconcat(OpcodeStr,
4209 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4211 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4213 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4214 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4215 !strconcat(OpcodeStr,
4216 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4217 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4218 addr:$dst)]>, OpSize;
4221 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4222 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4223 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4224 (ins VR128:$src1, i32i8imm:$src2),
4225 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4228 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4230 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4231 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4234 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4235 Requires<[HasSSE41]>;
4237 //===----------------------------------------------------------------------===//
4238 // SSE4.1 - Insert Instructions
4239 //===----------------------------------------------------------------------===//
4241 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4242 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4243 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4245 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4247 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4249 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4250 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4251 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4253 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4255 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4257 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4258 imm:$src3))]>, OpSize;
4261 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4262 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4263 let Constraints = "$src1 = $dst" in
4264 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4266 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4267 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4268 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4270 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4272 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4274 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4276 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4277 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4279 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4281 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4283 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4284 imm:$src3)))]>, OpSize;
4287 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4288 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4289 let Constraints = "$src1 = $dst" in
4290 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4292 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4293 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4294 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4296 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4298 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4300 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4302 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4303 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4305 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4307 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4309 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4310 imm:$src3)))]>, OpSize;
4313 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4314 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4315 let Constraints = "$src1 = $dst" in
4316 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4318 // insertps has a few different modes, there's the first two here below which
4319 // are optimized inserts that won't zero arbitrary elements in the destination
4320 // vector. The next one matches the intrinsic and could zero arbitrary elements
4321 // in the target vector.
4322 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4323 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4324 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4326 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4328 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4330 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4332 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4333 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4335 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4337 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4339 (X86insrtps VR128:$src1,
4340 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4341 imm:$src3))]>, OpSize;
4344 let Constraints = "$src1 = $dst" in
4345 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4346 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4347 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4349 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4350 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4352 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4353 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4354 Requires<[HasSSE41]>;
4356 //===----------------------------------------------------------------------===//
4357 // SSE4.1 - Round Instructions
4358 //===----------------------------------------------------------------------===//
4360 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4361 X86MemOperand x86memop, RegisterClass RC,
4362 PatFrag mem_frag32, PatFrag mem_frag64,
4363 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4364 // Intrinsic operation, reg.
4365 // Vector intrinsic operation, reg
4366 def PSr : SS4AIi8<opcps, MRMSrcReg,
4367 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4368 !strconcat(OpcodeStr,
4369 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4370 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4373 // Vector intrinsic operation, mem
4374 def PSm : Ii8<opcps, MRMSrcMem,
4375 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4376 !strconcat(OpcodeStr,
4377 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4379 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4381 Requires<[HasSSE41]>;
4383 // Vector intrinsic operation, reg
4384 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4385 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4386 !strconcat(OpcodeStr,
4387 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4388 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4391 // Vector intrinsic operation, mem
4392 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4393 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4394 !strconcat(OpcodeStr,
4395 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4397 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4401 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4402 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4403 // Intrinsic operation, reg.
4404 // Vector intrinsic operation, reg
4405 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4406 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4407 !strconcat(OpcodeStr,
4408 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4411 // Vector intrinsic operation, mem
4412 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4413 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4414 !strconcat(OpcodeStr,
4415 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4416 []>, TA, OpSize, Requires<[HasSSE41]>;
4418 // Vector intrinsic operation, reg
4419 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4420 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4421 !strconcat(OpcodeStr,
4422 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4425 // Vector intrinsic operation, mem
4426 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4427 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4428 !strconcat(OpcodeStr,
4429 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4433 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4436 Intrinsic F64Int, bit Is2Addr = 1> {
4437 // Intrinsic operation, reg.
4438 def SSr : SS4AIi8<opcss, MRMSrcReg,
4439 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4441 !strconcat(OpcodeStr,
4442 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4443 !strconcat(OpcodeStr,
4444 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4445 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4448 // Intrinsic operation, mem.
4449 def SSm : SS4AIi8<opcss, MRMSrcMem,
4450 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4452 !strconcat(OpcodeStr,
4453 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4454 !strconcat(OpcodeStr,
4455 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4457 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4460 // Intrinsic operation, reg.
4461 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4462 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4464 !strconcat(OpcodeStr,
4465 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4466 !strconcat(OpcodeStr,
4467 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4468 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4471 // Intrinsic operation, mem.
4472 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4473 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4475 !strconcat(OpcodeStr,
4476 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4477 !strconcat(OpcodeStr,
4478 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4480 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4484 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4486 // Intrinsic operation, reg.
4487 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4488 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4489 !strconcat(OpcodeStr,
4490 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4493 // Intrinsic operation, mem.
4494 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4495 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4496 !strconcat(OpcodeStr,
4497 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4500 // Intrinsic operation, reg.
4501 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4502 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4503 !strconcat(OpcodeStr,
4504 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4507 // Intrinsic operation, mem.
4508 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4509 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4510 !strconcat(OpcodeStr,
4511 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4515 // FP round - roundss, roundps, roundsd, roundpd
4516 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4518 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4519 memopv4f32, memopv2f64,
4520 int_x86_sse41_round_ps,
4521 int_x86_sse41_round_pd>, VEX;
4522 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4523 memopv8f32, memopv4f64,
4524 int_x86_avx_round_ps_256,
4525 int_x86_avx_round_pd_256>, VEX;
4526 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4527 int_x86_sse41_round_ss,
4528 int_x86_sse41_round_sd, 0>, VEX_4V;
4530 // Instructions for the assembler
4531 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4533 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4535 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4538 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4539 memopv4f32, memopv2f64,
4540 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4541 let Constraints = "$src1 = $dst" in
4542 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4543 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4545 //===----------------------------------------------------------------------===//
4546 // SSE4.1 - Packed Bit Test
4547 //===----------------------------------------------------------------------===//
4549 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4550 // the intel intrinsic that corresponds to this.
4551 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4552 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4553 "vptest\t{$src2, $src1|$src1, $src2}",
4554 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4556 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4557 "vptest\t{$src2, $src1|$src1, $src2}",
4558 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4561 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4562 "vptest\t{$src2, $src1|$src1, $src2}",
4563 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4565 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4566 "vptest\t{$src2, $src1|$src1, $src2}",
4567 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4571 let Defs = [EFLAGS] in {
4572 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4573 "ptest \t{$src2, $src1|$src1, $src2}",
4574 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4576 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4577 "ptest \t{$src2, $src1|$src1, $src2}",
4578 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4582 // The bit test instructions below are AVX only
4583 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4584 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4585 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4586 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4587 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4588 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4589 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4590 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4594 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4595 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4596 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4597 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4598 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4601 //===----------------------------------------------------------------------===//
4602 // SSE4.1 - Misc Instructions
4603 //===----------------------------------------------------------------------===//
4605 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4606 "popcnt{w}\t{$src, $dst|$dst, $src}",
4607 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4608 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4609 "popcnt{w}\t{$src, $dst|$dst, $src}",
4610 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4612 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4613 "popcnt{l}\t{$src, $dst|$dst, $src}",
4614 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4615 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4616 "popcnt{l}\t{$src, $dst|$dst, $src}",
4617 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
4619 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4620 "popcnt{q}\t{$src, $dst|$dst, $src}",
4621 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
4622 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4623 "popcnt{q}\t{$src, $dst|$dst, $src}",
4624 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
4628 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4629 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4630 Intrinsic IntId128> {
4631 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4633 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4634 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4635 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4637 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4640 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4643 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4644 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4645 int_x86_sse41_phminposuw>, VEX;
4646 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4647 int_x86_sse41_phminposuw>;
4649 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4650 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4651 Intrinsic IntId128, bit Is2Addr = 1> {
4652 let isCommutable = 1 in
4653 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4654 (ins VR128:$src1, VR128:$src2),
4656 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4657 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4658 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4659 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4660 (ins VR128:$src1, i128mem:$src2),
4662 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4663 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4665 (IntId128 VR128:$src1,
4666 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4669 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4670 let isCommutable = 0 in
4671 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4673 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4675 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4677 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4679 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4681 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4683 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4685 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4687 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4689 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4691 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4695 let Constraints = "$src1 = $dst" in {
4696 let isCommutable = 0 in
4697 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4698 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4699 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4700 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4701 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4702 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4703 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4704 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4705 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4706 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4707 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4710 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4711 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4712 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4713 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4715 /// SS48I_binop_rm - Simple SSE41 binary operator.
4716 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4717 ValueType OpVT, bit Is2Addr = 1> {
4718 let isCommutable = 1 in
4719 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4720 (ins VR128:$src1, VR128:$src2),
4722 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4723 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4724 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4726 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4727 (ins VR128:$src1, i128mem:$src2),
4729 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4730 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4731 [(set VR128:$dst, (OpNode VR128:$src1,
4732 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4736 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4737 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4738 let Constraints = "$src1 = $dst" in
4739 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4741 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4742 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4743 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4744 X86MemOperand x86memop, bit Is2Addr = 1> {
4745 let isCommutable = 1 in
4746 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4747 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4749 !strconcat(OpcodeStr,
4750 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4751 !strconcat(OpcodeStr,
4752 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4753 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4755 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4756 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4758 !strconcat(OpcodeStr,
4759 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4760 !strconcat(OpcodeStr,
4761 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4764 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4768 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4769 let isCommutable = 0 in {
4770 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4771 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4772 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4773 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4774 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4775 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4776 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4777 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4778 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4779 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4780 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4781 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4783 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4784 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4785 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4786 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4787 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4788 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4791 let Constraints = "$src1 = $dst" in {
4792 let isCommutable = 0 in {
4793 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4794 VR128, memopv16i8, i128mem>;
4795 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4796 VR128, memopv16i8, i128mem>;
4797 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4798 VR128, memopv16i8, i128mem>;
4799 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4800 VR128, memopv16i8, i128mem>;
4802 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4803 VR128, memopv16i8, i128mem>;
4804 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4805 VR128, memopv16i8, i128mem>;
4808 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4809 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4810 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4811 RegisterClass RC, X86MemOperand x86memop,
4812 PatFrag mem_frag, Intrinsic IntId> {
4813 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4814 (ins RC:$src1, RC:$src2, RC:$src3),
4815 !strconcat(OpcodeStr,
4816 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4817 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4818 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4820 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4821 (ins RC:$src1, x86memop:$src2, RC:$src3),
4822 !strconcat(OpcodeStr,
4823 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4825 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4827 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4831 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4832 memopv16i8, int_x86_sse41_blendvpd>;
4833 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4834 memopv16i8, int_x86_sse41_blendvps>;
4835 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4836 memopv16i8, int_x86_sse41_pblendvb>;
4837 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4838 memopv32i8, int_x86_avx_blendv_pd_256>;
4839 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4840 memopv32i8, int_x86_avx_blendv_ps_256>;
4842 /// SS41I_ternary_int - SSE 4.1 ternary operator
4843 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4844 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4845 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4846 (ins VR128:$src1, VR128:$src2),
4847 !strconcat(OpcodeStr,
4848 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4849 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4852 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4853 (ins VR128:$src1, i128mem:$src2),
4854 !strconcat(OpcodeStr,
4855 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4858 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4862 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4863 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4864 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4866 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4867 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4868 "vmovntdqa\t{$src, $dst|$dst, $src}",
4869 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4871 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4872 "movntdqa\t{$src, $dst|$dst, $src}",
4873 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4876 //===----------------------------------------------------------------------===//
4877 // SSE4.2 - Compare Instructions
4878 //===----------------------------------------------------------------------===//
4880 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4881 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4882 Intrinsic IntId128, bit Is2Addr = 1> {
4883 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4884 (ins VR128:$src1, VR128:$src2),
4886 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4887 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4888 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4890 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4891 (ins VR128:$src1, i128mem:$src2),
4893 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4894 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4896 (IntId128 VR128:$src1,
4897 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4900 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4901 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4903 let Constraints = "$src1 = $dst" in
4904 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4906 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4907 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4908 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4909 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4911 //===----------------------------------------------------------------------===//
4912 // SSE4.2 - String/text Processing Instructions
4913 //===----------------------------------------------------------------------===//
4915 // Packed Compare Implicit Length Strings, Return Mask
4916 multiclass pseudo_pcmpistrm<string asm> {
4917 def REG : PseudoI<(outs VR128:$dst),
4918 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4919 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4921 def MEM : PseudoI<(outs VR128:$dst),
4922 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4923 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4924 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4927 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4928 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4929 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4932 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4933 Predicates = [HasAVX] in {
4934 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4935 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4936 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4937 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4938 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4939 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4942 let Defs = [XMM0, EFLAGS] in {
4943 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4944 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4945 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4946 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4947 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4948 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4951 // Packed Compare Explicit Length Strings, Return Mask
4952 multiclass pseudo_pcmpestrm<string asm> {
4953 def REG : PseudoI<(outs VR128:$dst),
4954 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4955 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4956 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4957 def MEM : PseudoI<(outs VR128:$dst),
4958 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4959 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4960 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4963 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4964 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4965 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4968 let isAsmParserOnly = 1, Predicates = [HasAVX],
4969 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4970 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4971 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4972 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4973 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4974 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4975 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4978 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4979 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4980 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4981 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4982 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4983 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4984 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4987 // Packed Compare Implicit Length Strings, Return Index
4988 let Defs = [ECX, EFLAGS] in {
4989 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4990 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4991 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4992 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4993 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4994 (implicit EFLAGS)]>, OpSize;
4995 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4996 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4997 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4998 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4999 (implicit EFLAGS)]>, OpSize;
5003 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
5004 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5006 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5008 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5010 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5012 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5014 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5018 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5019 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5020 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5021 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5022 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5023 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5025 // Packed Compare Explicit Length Strings, Return Index
5026 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5027 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5028 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5029 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5030 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5031 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5032 (implicit EFLAGS)]>, OpSize;
5033 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5034 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5035 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5037 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5038 (implicit EFLAGS)]>, OpSize;
5042 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
5043 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5045 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5047 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5049 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5051 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5053 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5057 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5058 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5059 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5060 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5061 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5062 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5064 //===----------------------------------------------------------------------===//
5065 // SSE4.2 - CRC Instructions
5066 //===----------------------------------------------------------------------===//
5068 // No CRC instructions have AVX equivalents
5070 // crc intrinsic instruction
5071 // This set of instructions are only rm, the only difference is the size
5073 let Constraints = "$src1 = $dst" in {
5074 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5075 (ins GR32:$src1, i8mem:$src2),
5076 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5078 (int_x86_sse42_crc32_8 GR32:$src1,
5079 (load addr:$src2)))]>;
5080 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5081 (ins GR32:$src1, GR8:$src2),
5082 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5084 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
5085 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5086 (ins GR32:$src1, i16mem:$src2),
5087 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5089 (int_x86_sse42_crc32_16 GR32:$src1,
5090 (load addr:$src2)))]>,
5092 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5093 (ins GR32:$src1, GR16:$src2),
5094 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5096 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5098 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5099 (ins GR32:$src1, i32mem:$src2),
5100 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5102 (int_x86_sse42_crc32_32 GR32:$src1,
5103 (load addr:$src2)))]>;
5104 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5105 (ins GR32:$src1, GR32:$src2),
5106 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5108 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5109 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5110 (ins GR64:$src1, i8mem:$src2),
5111 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5113 (int_x86_sse42_crc64_8 GR64:$src1,
5114 (load addr:$src2)))]>,
5116 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5117 (ins GR64:$src1, GR8:$src2),
5118 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5120 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5122 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5123 (ins GR64:$src1, i64mem:$src2),
5124 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5126 (int_x86_sse42_crc64_64 GR64:$src1,
5127 (load addr:$src2)))]>,
5129 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5130 (ins GR64:$src1, GR64:$src2),
5131 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5133 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5137 //===----------------------------------------------------------------------===//
5138 // AES-NI Instructions
5139 //===----------------------------------------------------------------------===//
5141 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5142 Intrinsic IntId128, bit Is2Addr = 1> {
5143 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5144 (ins VR128:$src1, VR128:$src2),
5146 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5147 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5148 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5150 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5151 (ins VR128:$src1, i128mem:$src2),
5153 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5154 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5156 (IntId128 VR128:$src1,
5157 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5160 // Perform One Round of an AES Encryption/Decryption Flow
5161 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5162 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5163 int_x86_aesni_aesenc, 0>, VEX_4V;
5164 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5165 int_x86_aesni_aesenclast, 0>, VEX_4V;
5166 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5167 int_x86_aesni_aesdec, 0>, VEX_4V;
5168 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5169 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5172 let Constraints = "$src1 = $dst" in {
5173 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5174 int_x86_aesni_aesenc>;
5175 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5176 int_x86_aesni_aesenclast>;
5177 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5178 int_x86_aesni_aesdec>;
5179 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5180 int_x86_aesni_aesdeclast>;
5183 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5184 (AESENCrr VR128:$src1, VR128:$src2)>;
5185 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5186 (AESENCrm VR128:$src1, addr:$src2)>;
5187 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5188 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5189 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5190 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5191 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5192 (AESDECrr VR128:$src1, VR128:$src2)>;
5193 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5194 (AESDECrm VR128:$src1, addr:$src2)>;
5195 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5196 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5197 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5198 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5200 // Perform the AES InvMixColumn Transformation
5201 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5202 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5204 "vaesimc\t{$src1, $dst|$dst, $src1}",
5206 (int_x86_aesni_aesimc VR128:$src1))]>,
5208 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5209 (ins i128mem:$src1),
5210 "vaesimc\t{$src1, $dst|$dst, $src1}",
5212 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5215 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5217 "aesimc\t{$src1, $dst|$dst, $src1}",
5219 (int_x86_aesni_aesimc VR128:$src1))]>,
5221 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5222 (ins i128mem:$src1),
5223 "aesimc\t{$src1, $dst|$dst, $src1}",
5225 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5228 // AES Round Key Generation Assist
5229 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5230 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5231 (ins VR128:$src1, i8imm:$src2),
5232 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5234 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5236 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5237 (ins i128mem:$src1, i8imm:$src2),
5238 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5240 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5244 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5245 (ins VR128:$src1, i8imm:$src2),
5246 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5248 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5250 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5251 (ins i128mem:$src1, i8imm:$src2),
5252 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5254 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5258 //===----------------------------------------------------------------------===//
5259 // CLMUL Instructions
5260 //===----------------------------------------------------------------------===//
5262 // Only the AVX version of CLMUL instructions are described here.
5264 // Carry-less Multiplication instructions
5265 let isAsmParserOnly = 1 in {
5266 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5267 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5268 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5271 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5272 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5273 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5277 multiclass avx_vpclmul<string asm> {
5278 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5279 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5282 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5283 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5286 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5287 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5288 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5289 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5291 } // isAsmParserOnly
5293 //===----------------------------------------------------------------------===//
5295 //===----------------------------------------------------------------------===//
5297 let isAsmParserOnly = 1 in {
5299 // Load from memory and broadcast to all elements of the destination operand
5300 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5301 X86MemOperand x86memop, Intrinsic Int> :
5302 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5303 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5304 [(set RC:$dst, (Int addr:$src))]>, VEX;
5306 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5307 int_x86_avx_vbroadcastss>;
5308 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5309 int_x86_avx_vbroadcastss_256>;
5310 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5311 int_x86_avx_vbroadcast_sd_256>;
5312 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5313 int_x86_avx_vbroadcastf128_pd_256>;
5315 // Insert packed floating-point values
5316 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5317 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5318 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5320 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5321 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5322 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5325 // Extract packed floating-point values
5326 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5327 (ins VR256:$src1, i8imm:$src2),
5328 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5330 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5331 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5332 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5335 // Conditional SIMD Packed Loads and Stores
5336 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5337 Intrinsic IntLd, Intrinsic IntLd256,
5338 Intrinsic IntSt, Intrinsic IntSt256,
5339 PatFrag pf128, PatFrag pf256> {
5340 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5341 (ins VR128:$src1, f128mem:$src2),
5342 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5343 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5345 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5346 (ins VR256:$src1, f256mem:$src2),
5347 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5348 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5350 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5351 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5352 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5353 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5354 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5355 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5356 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5357 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5360 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5361 int_x86_avx_maskload_ps,
5362 int_x86_avx_maskload_ps_256,
5363 int_x86_avx_maskstore_ps,
5364 int_x86_avx_maskstore_ps_256,
5365 memopv4f32, memopv8f32>;
5366 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5367 int_x86_avx_maskload_pd,
5368 int_x86_avx_maskload_pd_256,
5369 int_x86_avx_maskstore_pd,
5370 int_x86_avx_maskstore_pd_256,
5371 memopv2f64, memopv4f64>;
5373 // Permute Floating-Point Values
5374 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5375 RegisterClass RC, X86MemOperand x86memop_f,
5376 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5377 Intrinsic IntVar, Intrinsic IntImm> {
5378 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5379 (ins RC:$src1, RC:$src2),
5380 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5381 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5382 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5383 (ins RC:$src1, x86memop_i:$src2),
5384 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5385 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5387 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5388 (ins RC:$src1, i8imm:$src2),
5389 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5390 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5391 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5392 (ins x86memop_f:$src1, i8imm:$src2),
5393 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5394 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5397 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5398 memopv4f32, memopv4i32,
5399 int_x86_avx_vpermilvar_ps,
5400 int_x86_avx_vpermil_ps>;
5401 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5402 memopv8f32, memopv8i32,
5403 int_x86_avx_vpermilvar_ps_256,
5404 int_x86_avx_vpermil_ps_256>;
5405 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5406 memopv2f64, memopv2i64,
5407 int_x86_avx_vpermilvar_pd,
5408 int_x86_avx_vpermil_pd>;
5409 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5410 memopv4f64, memopv4i64,
5411 int_x86_avx_vpermilvar_pd_256,
5412 int_x86_avx_vpermil_pd_256>;
5414 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5415 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5416 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5418 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5419 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5420 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5423 // Zero All YMM registers
5424 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5425 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5427 // Zero Upper bits of YMM registers
5428 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5429 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5431 } // isAsmParserOnly
5433 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5434 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5435 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5436 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5437 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5438 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5440 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5441 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5442 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5443 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5444 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5445 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5447 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5448 (VBROADCASTF128 addr:$src)>;
5450 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5451 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5452 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5453 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5454 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5455 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5457 def : Pat<(int_x86_avx_vperm2f128_ps_256
5458 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5459 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5460 def : Pat<(int_x86_avx_vperm2f128_pd_256
5461 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5462 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5463 def : Pat<(int_x86_avx_vperm2f128_si_256
5464 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5465 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5467 //===----------------------------------------------------------------------===//
5468 // SSE Shuffle pattern fragments
5469 //===----------------------------------------------------------------------===//
5471 // This is part of a "work in progress" refactoring. The idea is that all
5472 // vector shuffles are going to be translated into target specific nodes and
5473 // directly matched by the patterns below (which can be changed along the way)
5474 // The AVX version of some but not all of them are described here, and more
5475 // should come in a near future.
5477 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5478 // SSE2 loads, which are always promoted to v2i64. The last one should match
5479 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5480 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5481 // we investigate further.
5482 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5484 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5485 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5487 (PSHUFDmi addr:$src1, imm:$imm)>;
5488 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5490 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5492 // Shuffle with PSHUFD instruction.
5493 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5494 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5495 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5496 (PSHUFDri VR128:$src1, imm:$imm)>;
5498 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5499 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5500 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5501 (PSHUFDri VR128:$src1, imm:$imm)>;
5503 // Shuffle with SHUFPD instruction.
5504 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5505 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5506 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5507 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5508 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5509 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5511 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5512 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5513 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5514 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5516 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5517 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5518 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5519 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5521 // Shuffle with SHUFPS instruction.
5522 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5523 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5524 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5525 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5526 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5527 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5529 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5530 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5531 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5532 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5534 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5535 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5536 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5537 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5538 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5539 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5541 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5542 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5543 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5544 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5546 // Shuffle with MOVHLPS instruction
5547 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5548 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5549 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5550 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5552 // Shuffle with MOVDDUP instruction
5553 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5554 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5555 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5556 (MOVDDUPrm addr:$src)>;
5558 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5559 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5560 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5561 (MOVDDUPrm addr:$src)>;
5563 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5564 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5565 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5566 (MOVDDUPrm addr:$src)>;
5568 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5569 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5570 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5571 (MOVDDUPrm addr:$src)>;
5573 def : Pat<(X86Movddup (bc_v2f64
5574 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5575 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5576 def : Pat<(X86Movddup (bc_v2f64
5577 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5578 (MOVDDUPrm addr:$src)>;
5581 // Shuffle with UNPCKLPS
5582 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5583 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5584 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5585 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5587 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5588 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5589 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5590 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5592 // Shuffle with UNPCKHPS
5593 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5594 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5595 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5596 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5598 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5599 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5600 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5601 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5603 // Shuffle with UNPCKLPD
5604 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5605 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5606 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5607 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5609 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5610 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5611 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5612 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5614 // Shuffle with UNPCKHPD
5615 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5616 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5617 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5618 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5620 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5621 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5622 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5623 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5625 // Shuffle with PUNPCKLBW
5626 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5627 (bc_v16i8 (memopv2i64 addr:$src2)))),
5628 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5629 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5630 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5632 // Shuffle with PUNPCKLWD
5633 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5634 (bc_v8i16 (memopv2i64 addr:$src2)))),
5635 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5636 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5637 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5639 // Shuffle with PUNPCKLDQ
5640 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5641 (bc_v4i32 (memopv2i64 addr:$src2)))),
5642 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5643 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5644 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5646 // Shuffle with PUNPCKLQDQ
5647 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5648 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5649 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5650 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5652 // Shuffle with PUNPCKHBW
5653 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5654 (bc_v16i8 (memopv2i64 addr:$src2)))),
5655 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5656 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5657 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5659 // Shuffle with PUNPCKHWD
5660 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5661 (bc_v8i16 (memopv2i64 addr:$src2)))),
5662 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5663 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5664 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5666 // Shuffle with PUNPCKHDQ
5667 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5668 (bc_v4i32 (memopv2i64 addr:$src2)))),
5669 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5670 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5671 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5673 // Shuffle with PUNPCKHQDQ
5674 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5675 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5676 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5677 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5679 // Shuffle with MOVLHPS
5680 def : Pat<(X86Movlhps VR128:$src1,
5681 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5682 (MOVHPSrm VR128:$src1, addr:$src2)>;
5683 def : Pat<(X86Movlhps VR128:$src1,
5684 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5685 (MOVHPSrm VR128:$src1, addr:$src2)>;
5686 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5687 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5688 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5689 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5690 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5691 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5693 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
5694 // is during lowering, where it's not possible to recognize the load fold cause
5695 // it has two uses through a bitcast. One use disappears at isel time and the
5696 // fold opportunity reappears.
5697 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5698 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5700 // Shuffle with MOVLHPD
5701 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5702 (scalar_to_vector (loadf64 addr:$src2)))),
5703 (MOVHPDrm VR128:$src1, addr:$src2)>;
5705 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5706 // is during lowering, where it's not possible to recognize the load fold cause
5707 // it has two uses through a bitcast. One use disappears at isel time and the
5708 // fold opportunity reappears.
5709 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5710 (scalar_to_vector (loadf64 addr:$src2)))),
5711 (MOVHPDrm VR128:$src1, addr:$src2)>;
5713 // Shuffle with MOVSS
5714 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5715 (MOVSSrr VR128:$src1, FR32:$src2)>;
5716 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5717 (MOVSSrr (v4i32 VR128:$src1),
5718 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5719 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5720 (MOVSSrr (v4f32 VR128:$src1),
5721 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5722 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5723 // is during lowering, where it's not possible to recognize the load fold cause
5724 // it has two uses through a bitcast. One use disappears at isel time and the
5725 // fold opportunity reappears.
5726 def : Pat<(X86Movss VR128:$src1,
5727 (bc_v4i32 (v2i64 (load addr:$src2)))),
5728 (MOVLPSrm VR128:$src1, addr:$src2)>;
5730 // Shuffle with MOVSD
5731 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5732 (MOVSDrr VR128:$src1, FR64:$src2)>;
5733 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5734 (MOVSDrr (v2i64 VR128:$src1),
5735 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5736 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5737 (MOVSDrr (v2f64 VR128:$src1),
5738 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5739 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5740 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5741 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5742 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5744 // Shuffle with MOVSHDUP
5745 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5746 (MOVSHDUPrr VR128:$src)>;
5747 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5748 (MOVSHDUPrm addr:$src)>;
5750 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5751 (MOVSHDUPrr VR128:$src)>;
5752 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5753 (MOVSHDUPrm addr:$src)>;
5755 // Shuffle with MOVSLDUP
5756 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5757 (MOVSLDUPrr VR128:$src)>;
5758 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5759 (MOVSLDUPrm addr:$src)>;
5761 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5762 (MOVSLDUPrr VR128:$src)>;
5763 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5764 (MOVSLDUPrm addr:$src)>;
5766 // Shuffle with PSHUFHW
5767 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5768 (PSHUFHWri VR128:$src, imm:$imm)>;
5769 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5770 (PSHUFHWmi addr:$src, imm:$imm)>;
5772 // Shuffle with PSHUFLW
5773 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5774 (PSHUFLWri VR128:$src, imm:$imm)>;
5775 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5776 (PSHUFLWmi addr:$src, imm:$imm)>;
5778 // Shuffle with PALIGN
5779 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5780 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5781 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5782 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5783 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5784 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5785 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5786 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5788 // Shuffle with MOVLPS
5789 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5790 (MOVLPSrm VR128:$src1, addr:$src2)>;
5791 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5792 (MOVLPSrm VR128:$src1, addr:$src2)>;
5793 def : Pat<(X86Movlps VR128:$src1,
5794 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5795 (MOVLPSrm VR128:$src1, addr:$src2)>;
5796 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
5797 // is during lowering, where it's not possible to recognize the load fold cause
5798 // it has two uses through a bitcast. One use disappears at isel time and the
5799 // fold opportunity reappears.
5800 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
5801 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5803 // Shuffle with MOVLPD
5804 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5805 (MOVLPDrm VR128:$src1, addr:$src2)>;
5806 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5807 (MOVLPDrm VR128:$src1, addr:$src2)>;
5808 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5809 (scalar_to_vector (loadf64 addr:$src2)))),
5810 (MOVLPDrm VR128:$src1, addr:$src2)>;
5812 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5813 def : Pat<(store (f64 (vector_extract
5814 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5815 (MOVHPSmr addr:$dst, VR128:$src)>;
5816 def : Pat<(store (f64 (vector_extract
5817 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5818 (MOVHPDmr addr:$dst, VR128:$src)>;
5820 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5821 (MOVLPSmr addr:$src1, VR128:$src2)>;
5822 def : Pat<(store (v4i32 (X86Movlps
5823 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5824 (MOVLPSmr addr:$src1, VR128:$src2)>;
5826 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5827 (MOVLPDmr addr:$src1, VR128:$src2)>;
5828 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5829 (MOVLPDmr addr:$src1, VR128:$src2)>;