1 //====- X86InstrMMX.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 MMX instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // MMX Pattern Fragments
18 //===----------------------------------------------------------------------===//
20 def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>;
22 def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
23 def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
24 def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
25 def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
27 //===----------------------------------------------------------------------===//
29 //===----------------------------------------------------------------------===//
31 // MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
33 def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
34 return getI8Imm(X86::getShuffleSHUFImmediate(N));
37 // Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
38 def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs),
39 (vector_shuffle node:$lhs, node:$rhs), [{
40 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
43 // Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
44 def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs),
45 (vector_shuffle node:$lhs, node:$rhs), [{
46 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
49 // Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
50 def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
51 (vector_shuffle node:$lhs, node:$rhs), [{
52 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
55 // Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
56 def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
57 (vector_shuffle node:$lhs, node:$rhs), [{
58 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
61 def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
62 (vector_shuffle node:$lhs, node:$rhs), [{
63 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
64 }], MMX_SHUFFLE_get_shuf_imm>;
66 //===----------------------------------------------------------------------===//
68 //===----------------------------------------------------------------------===//
70 let Constraints = "$src1 = $dst" in {
71 // MMXI_binop_rm - Simple MMX binary operator.
72 multiclass MMXI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
73 ValueType OpVT, bit Commutable = 0> {
74 def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
75 (ins VR64:$src1, VR64:$src2),
76 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
77 [(set VR64:$dst, (OpVT (OpNode VR64:$src1, VR64:$src2)))]> {
78 let isCommutable = Commutable;
80 def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
81 (ins VR64:$src1, i64mem:$src2),
82 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
83 [(set VR64:$dst, (OpVT (OpNode VR64:$src1,
85 (load_mmx addr:$src2)))))]>;
88 multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
90 def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
91 (ins VR64:$src1, VR64:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]> {
94 let isCommutable = Commutable;
96 def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
97 (ins VR64:$src1, i64mem:$src2),
98 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
99 [(set VR64:$dst, (IntId VR64:$src1,
100 (bitconvert (load_mmx addr:$src2))))]>;
103 // MMXI_binop_rm_v1i64 - Simple MMX binary operator whose type is v1i64.
105 // FIXME: we could eliminate this and use MMXI_binop_rm instead if tblgen knew
106 // to collapse (bitconvert VT to VT) into its operand.
108 multiclass MMXI_binop_rm_v1i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
109 bit Commutable = 0> {
110 def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
111 (ins VR64:$src1, VR64:$src2),
112 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
113 [(set VR64:$dst, (v1i64 (OpNode VR64:$src1, VR64:$src2)))]> {
114 let isCommutable = Commutable;
116 def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
117 (ins VR64:$src1, i64mem:$src2),
118 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
120 (OpNode VR64:$src1,(load_mmx addr:$src2)))]>;
123 multiclass MMXI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
124 string OpcodeStr, Intrinsic IntId,
126 def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
127 (ins VR64:$src1, VR64:$src2),
128 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
129 [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]>;
130 def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
131 (ins VR64:$src1, i64mem:$src2),
132 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
133 [(set VR64:$dst, (IntId VR64:$src1,
134 (bitconvert (load_mmx addr:$src2))))]>;
135 def ri : MMXIi8<opc2, ImmForm, (outs VR64:$dst),
136 (ins VR64:$src1, i32i8imm:$src2),
137 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
138 [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))]>;
142 //===----------------------------------------------------------------------===//
143 // MMX EMMS & FEMMS Instructions
144 //===----------------------------------------------------------------------===//
146 def MMX_EMMS : MMXI<0x77, RawFrm, (outs), (ins), "emms",
147 [(int_x86_mmx_emms)]>;
148 def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms",
149 [(int_x86_mmx_femms)]>;
151 //===----------------------------------------------------------------------===//
152 // MMX Scalar Instructions
153 //===----------------------------------------------------------------------===//
155 // Data Transfer Instructions
156 def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
157 "movd\t{$src, $dst|$dst, $src}",
159 (v2i32 (scalar_to_vector GR32:$src)))]>;
160 let canFoldAsLoad = 1, isReMaterializable = 1 in
161 def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
162 "movd\t{$src, $dst|$dst, $src}",
164 (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>;
166 def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
167 "movd\t{$src, $dst|$dst, $src}", []>;
168 def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs), (ins GR32:$dst, VR64:$src),
169 "movd\t{$src, $dst|$dst, $src}", []>;
170 def MMX_MOVQ64gmr : MMXRI<0x7E, MRMDestMem, (outs),
171 (ins i64mem:$dst, VR64:$src),
172 "movq\t{$src, $dst|$dst, $src}", []>;
174 let neverHasSideEffects = 1 in
175 def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
176 "movd\t{$src, $dst|$dst, $src}",
179 let neverHasSideEffects = 1 in
180 // These are 64 bit moves, but since the OS X assembler doesn't
181 // recognize a register-register movq, we write them as
183 def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg,
184 (outs GR64:$dst), (ins VR64:$src),
185 "movd\t{$src, $dst|$dst, $src}", []>;
186 def MMX_MOVD64rrv164 : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
187 "movd\t{$src, $dst|$dst, $src}",
189 (v1i64 (scalar_to_vector GR64:$src)))]>;
191 let neverHasSideEffects = 1 in
192 def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
193 "movq\t{$src, $dst|$dst, $src}", []>;
194 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
195 def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
196 "movq\t{$src, $dst|$dst, $src}",
197 [(set VR64:$dst, (load_mmx addr:$src))]>;
198 def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
199 "movq\t{$src, $dst|$dst, $src}",
200 [(store (v1i64 VR64:$src), addr:$dst)]>;
202 def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
203 "movdq2q\t{$src, $dst|$dst, $src}",
206 (i64 (vector_extract (v2i64 VR128:$src),
209 def MMX_MOVQ2DQrr : SSDIi8<0xD6, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
210 "movq2dq\t{$src, $dst|$dst, $src}",
213 (v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src))))))]>;
215 let neverHasSideEffects = 1 in
216 def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst), (ins VR64:$src),
217 "movq2dq\t{$src, $dst|$dst, $src}", []>;
219 def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
220 "movntq\t{$src, $dst|$dst, $src}",
221 [(int_x86_mmx_movnt_dq addr:$dst, VR64:$src)]>;
223 let AddedComplexity = 15 in
224 // movd to MMX register zero-extends
225 def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
226 "movd\t{$src, $dst|$dst, $src}",
228 (v2i32 (X86vzmovl (v2i32 (scalar_to_vector GR32:$src)))))]>;
229 let AddedComplexity = 20 in
230 def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst),
232 "movd\t{$src, $dst|$dst, $src}",
234 (v2i32 (X86vzmovl (v2i32
235 (scalar_to_vector (loadi32 addr:$src))))))]>;
237 // Arithmetic Instructions
240 defm MMX_PADDB : MMXI_binop_rm<0xFC, "paddb", add, v8i8, 1>;
241 defm MMX_PADDW : MMXI_binop_rm<0xFD, "paddw", add, v4i16, 1>;
242 defm MMX_PADDD : MMXI_binop_rm<0xFE, "paddd", add, v2i32, 1>;
243 defm MMX_PADDQ : MMXI_binop_rm<0xD4, "paddq", add, v1i64, 1>;
245 defm MMX_PADDSB : MMXI_binop_rm_int<0xEC, "paddsb" , int_x86_mmx_padds_b, 1>;
246 defm MMX_PADDSW : MMXI_binop_rm_int<0xED, "paddsw" , int_x86_mmx_padds_w, 1>;
248 defm MMX_PADDUSB : MMXI_binop_rm_int<0xDC, "paddusb", int_x86_mmx_paddus_b, 1>;
249 defm MMX_PADDUSW : MMXI_binop_rm_int<0xDD, "paddusw", int_x86_mmx_paddus_w, 1>;
252 defm MMX_PSUBB : MMXI_binop_rm<0xF8, "psubb", sub, v8i8>;
253 defm MMX_PSUBW : MMXI_binop_rm<0xF9, "psubw", sub, v4i16>;
254 defm MMX_PSUBD : MMXI_binop_rm<0xFA, "psubd", sub, v2i32>;
255 defm MMX_PSUBQ : MMXI_binop_rm<0xFB, "psubq", sub, v1i64>;
257 defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b>;
258 defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w>;
260 defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b>;
261 defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w>;
264 defm MMX_PMULLW : MMXI_binop_rm<0xD5, "pmullw", mul, v4i16, 1>;
266 defm MMX_PMULHW : MMXI_binop_rm_int<0xE5, "pmulhw", int_x86_mmx_pmulh_w, 1>;
267 defm MMX_PMULHUW : MMXI_binop_rm_int<0xE4, "pmulhuw", int_x86_mmx_pmulhu_w, 1>;
268 defm MMX_PMULUDQ : MMXI_binop_rm_int<0xF4, "pmuludq", int_x86_mmx_pmulu_dq, 1>;
271 defm MMX_PMADDWD : MMXI_binop_rm_int<0xF5, "pmaddwd", int_x86_mmx_pmadd_wd, 1>;
273 defm MMX_PAVGB : MMXI_binop_rm_int<0xE0, "pavgb", int_x86_mmx_pavg_b, 1>;
274 defm MMX_PAVGW : MMXI_binop_rm_int<0xE3, "pavgw", int_x86_mmx_pavg_w, 1>;
276 defm MMX_PMINUB : MMXI_binop_rm_int<0xDA, "pminub", int_x86_mmx_pminu_b, 1>;
277 defm MMX_PMINSW : MMXI_binop_rm_int<0xEA, "pminsw", int_x86_mmx_pmins_w, 1>;
279 defm MMX_PMAXUB : MMXI_binop_rm_int<0xDE, "pmaxub", int_x86_mmx_pmaxu_b, 1>;
280 defm MMX_PMAXSW : MMXI_binop_rm_int<0xEE, "pmaxsw", int_x86_mmx_pmaxs_w, 1>;
282 defm MMX_PSADBW : MMXI_binop_rm_int<0xF6, "psadbw", int_x86_mmx_psad_bw, 1>;
284 // Logical Instructions
285 defm MMX_PAND : MMXI_binop_rm_v1i64<0xDB, "pand", and, 1>;
286 defm MMX_POR : MMXI_binop_rm_v1i64<0xEB, "por" , or, 1>;
287 defm MMX_PXOR : MMXI_binop_rm_v1i64<0xEF, "pxor", xor, 1>;
289 let Constraints = "$src1 = $dst" in {
290 def MMX_PANDNrr : MMXI<0xDF, MRMSrcReg,
291 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
292 "pandn\t{$src2, $dst|$dst, $src2}",
293 [(set VR64:$dst, (v1i64 (and (vnot VR64:$src1),
295 def MMX_PANDNrm : MMXI<0xDF, MRMSrcMem,
296 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
297 "pandn\t{$src2, $dst|$dst, $src2}",
298 [(set VR64:$dst, (v1i64 (and (vnot VR64:$src1),
299 (load addr:$src2))))]>;
302 // Shift Instructions
303 defm MMX_PSRLW : MMXI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
304 int_x86_mmx_psrl_w, int_x86_mmx_psrli_w>;
305 defm MMX_PSRLD : MMXI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
306 int_x86_mmx_psrl_d, int_x86_mmx_psrli_d>;
307 defm MMX_PSRLQ : MMXI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
308 int_x86_mmx_psrl_q, int_x86_mmx_psrli_q>;
310 defm MMX_PSLLW : MMXI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
311 int_x86_mmx_psll_w, int_x86_mmx_pslli_w>;
312 defm MMX_PSLLD : MMXI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
313 int_x86_mmx_psll_d, int_x86_mmx_pslli_d>;
314 defm MMX_PSLLQ : MMXI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
315 int_x86_mmx_psll_q, int_x86_mmx_pslli_q>;
317 defm MMX_PSRAW : MMXI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
318 int_x86_mmx_psra_w, int_x86_mmx_psrai_w>;
319 defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
320 int_x86_mmx_psra_d, int_x86_mmx_psrai_d>;
322 // Shift up / down and insert zero's.
323 def : Pat<(v1i64 (X86vshl VR64:$src, (i8 imm:$amt))),
324 (v1i64 (MMX_PSLLQri VR64:$src, imm:$amt))>;
325 def : Pat<(v1i64 (X86vshr VR64:$src, (i8 imm:$amt))),
326 (v1i64 (MMX_PSRLQri VR64:$src, imm:$amt))>;
328 // Comparison Instructions
329 defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>;
330 defm MMX_PCMPEQW : MMXI_binop_rm_int<0x75, "pcmpeqw", int_x86_mmx_pcmpeq_w>;
331 defm MMX_PCMPEQD : MMXI_binop_rm_int<0x76, "pcmpeqd", int_x86_mmx_pcmpeq_d>;
333 defm MMX_PCMPGTB : MMXI_binop_rm_int<0x64, "pcmpgtb", int_x86_mmx_pcmpgt_b>;
334 defm MMX_PCMPGTW : MMXI_binop_rm_int<0x65, "pcmpgtw", int_x86_mmx_pcmpgt_w>;
335 defm MMX_PCMPGTD : MMXI_binop_rm_int<0x66, "pcmpgtd", int_x86_mmx_pcmpgt_d>;
337 // Conversion Instructions
339 // -- Unpack Instructions
340 let Constraints = "$src1 = $dst" in {
341 // Unpack High Packed Data Instructions
342 def MMX_PUNPCKHBWrr : MMXI<0x68, MRMSrcReg,
343 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
344 "punpckhbw\t{$src2, $dst|$dst, $src2}",
346 (v8i8 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
347 def MMX_PUNPCKHBWrm : MMXI<0x68, MRMSrcMem,
348 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
349 "punpckhbw\t{$src2, $dst|$dst, $src2}",
351 (v8i8 (mmx_unpckh VR64:$src1,
352 (bc_v8i8 (load_mmx addr:$src2)))))]>;
354 def MMX_PUNPCKHWDrr : MMXI<0x69, MRMSrcReg,
355 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
356 "punpckhwd\t{$src2, $dst|$dst, $src2}",
358 (v4i16 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
359 def MMX_PUNPCKHWDrm : MMXI<0x69, MRMSrcMem,
360 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
361 "punpckhwd\t{$src2, $dst|$dst, $src2}",
363 (v4i16 (mmx_unpckh VR64:$src1,
364 (bc_v4i16 (load_mmx addr:$src2)))))]>;
366 def MMX_PUNPCKHDQrr : MMXI<0x6A, MRMSrcReg,
367 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
368 "punpckhdq\t{$src2, $dst|$dst, $src2}",
370 (v2i32 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
371 def MMX_PUNPCKHDQrm : MMXI<0x6A, MRMSrcMem,
372 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
373 "punpckhdq\t{$src2, $dst|$dst, $src2}",
375 (v2i32 (mmx_unpckh VR64:$src1,
376 (bc_v2i32 (load_mmx addr:$src2)))))]>;
378 // Unpack Low Packed Data Instructions
379 def MMX_PUNPCKLBWrr : MMXI<0x60, MRMSrcReg,
380 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
381 "punpcklbw\t{$src2, $dst|$dst, $src2}",
383 (v8i8 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
384 def MMX_PUNPCKLBWrm : MMXI<0x60, MRMSrcMem,
385 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
386 "punpcklbw\t{$src2, $dst|$dst, $src2}",
388 (v8i8 (mmx_unpckl VR64:$src1,
389 (bc_v8i8 (load_mmx addr:$src2)))))]>;
391 def MMX_PUNPCKLWDrr : MMXI<0x61, MRMSrcReg,
392 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
393 "punpcklwd\t{$src2, $dst|$dst, $src2}",
395 (v4i16 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
396 def MMX_PUNPCKLWDrm : MMXI<0x61, MRMSrcMem,
397 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
398 "punpcklwd\t{$src2, $dst|$dst, $src2}",
400 (v4i16 (mmx_unpckl VR64:$src1,
401 (bc_v4i16 (load_mmx addr:$src2)))))]>;
403 def MMX_PUNPCKLDQrr : MMXI<0x62, MRMSrcReg,
404 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
405 "punpckldq\t{$src2, $dst|$dst, $src2}",
407 (v2i32 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
408 def MMX_PUNPCKLDQrm : MMXI<0x62, MRMSrcMem,
409 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
410 "punpckldq\t{$src2, $dst|$dst, $src2}",
412 (v2i32 (mmx_unpckl VR64:$src1,
413 (bc_v2i32 (load_mmx addr:$src2)))))]>;
416 // -- Pack Instructions
417 defm MMX_PACKSSWB : MMXI_binop_rm_int<0x63, "packsswb", int_x86_mmx_packsswb>;
418 defm MMX_PACKSSDW : MMXI_binop_rm_int<0x6B, "packssdw", int_x86_mmx_packssdw>;
419 defm MMX_PACKUSWB : MMXI_binop_rm_int<0x67, "packuswb", int_x86_mmx_packuswb>;
421 // -- Shuffle Instructions
422 def MMX_PSHUFWri : MMXIi8<0x70, MRMSrcReg,
423 (outs VR64:$dst), (ins VR64:$src1, i8imm:$src2),
424 "pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
426 (v4i16 (mmx_pshufw:$src2 VR64:$src1, (undef))))]>;
427 def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
428 (outs VR64:$dst), (ins i64mem:$src1, i8imm:$src2),
429 "pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
431 (mmx_pshufw:$src2 (bc_v4i16 (load_mmx addr:$src1)),
434 // -- Conversion Instructions
435 let neverHasSideEffects = 1 in {
436 def MMX_CVTPD2PIrr : MMX2I<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
437 "cvtpd2pi\t{$src, $dst|$dst, $src}", []>;
439 def MMX_CVTPD2PIrm : MMX2I<0x2D, MRMSrcMem, (outs VR64:$dst),
441 "cvtpd2pi\t{$src, $dst|$dst, $src}", []>;
443 def MMX_CVTPI2PDrr : MMX2I<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
444 "cvtpi2pd\t{$src, $dst|$dst, $src}", []>;
446 def MMX_CVTPI2PDrm : MMX2I<0x2A, MRMSrcMem, (outs VR128:$dst),
448 "cvtpi2pd\t{$src, $dst|$dst, $src}", []>;
450 def MMX_CVTPI2PSrr : MMXI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
451 "cvtpi2ps\t{$src, $dst|$dst, $src}", []>;
453 def MMX_CVTPI2PSrm : MMXI<0x2A, MRMSrcMem, (outs VR128:$dst),
455 "cvtpi2ps\t{$src, $dst|$dst, $src}", []>;
457 def MMX_CVTPS2PIrr : MMXI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
458 "cvtps2pi\t{$src, $dst|$dst, $src}", []>;
460 def MMX_CVTPS2PIrm : MMXI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
461 "cvtps2pi\t{$src, $dst|$dst, $src}", []>;
463 def MMX_CVTTPD2PIrr : MMX2I<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
464 "cvttpd2pi\t{$src, $dst|$dst, $src}", []>;
466 def MMX_CVTTPD2PIrm : MMX2I<0x2C, MRMSrcMem, (outs VR64:$dst),
468 "cvttpd2pi\t{$src, $dst|$dst, $src}", []>;
470 def MMX_CVTTPS2PIrr : MMXI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
471 "cvttps2pi\t{$src, $dst|$dst, $src}", []>;
473 def MMX_CVTTPS2PIrm : MMXI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
474 "cvttps2pi\t{$src, $dst|$dst, $src}", []>;
475 } // end neverHasSideEffects
479 def MMX_X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>;
480 def MMX_X86pinsrw : SDNode<"X86ISD::PINSRW", SDTypeProfile<1, 3, []>, []>;
482 def MMX_PEXTRWri : MMXIi8<0xC5, MRMSrcReg,
483 (outs GR32:$dst), (ins VR64:$src1, i16i8imm:$src2),
484 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
485 [(set GR32:$dst, (MMX_X86pextrw (v4i16 VR64:$src1),
486 (iPTR imm:$src2)))]>;
487 let Constraints = "$src1 = $dst" in {
488 def MMX_PINSRWrri : MMXIi8<0xC4, MRMSrcReg,
490 (ins VR64:$src1, GR32:$src2,i16i8imm:$src3),
491 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
492 [(set VR64:$dst, (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1),
493 GR32:$src2,(iPTR imm:$src3))))]>;
494 def MMX_PINSRWrmi : MMXIi8<0xC4, MRMSrcMem,
496 (ins VR64:$src1, i16mem:$src2, i16i8imm:$src3),
497 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
499 (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1),
500 (i32 (anyext (loadi16 addr:$src2))),
501 (iPTR imm:$src3))))]>;
504 // MMX to XMM for vector types
505 def MMX_X86movq2dq : SDNode<"X86ISD::MOVQ2DQ", SDTypeProfile<1, 1,
506 [SDTCisVT<0, v2i64>, SDTCisVT<1, v1i64>]>>;
508 def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)),
509 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
511 def : Pat<(v2i64 (MMX_X86movq2dq (load_mmx addr:$src))),
512 (v2i64 (MOVQI2PQIrm addr:$src))>;
514 def : Pat<(v2i64 (MMX_X86movq2dq (v1i64 (bitconvert
515 (v2i32 (scalar_to_vector (loadi32 addr:$src))))))),
516 (v2i64 (MOVDI2PDIrm addr:$src))>;
519 def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src),
520 "pmovmskb\t{$src, $dst|$dst, $src}",
521 [(set GR32:$dst, (int_x86_mmx_pmovmskb VR64:$src))]>;
525 def MMX_MASKMOVQ : MMXI<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
526 "maskmovq\t{$mask, $src|$src, $mask}",
527 [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)]>;
529 def MMX_MASKMOVQ64: MMXI64<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
530 "maskmovq\t{$mask, $src|$src, $mask}",
531 [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, RDI)]>;
533 //===----------------------------------------------------------------------===//
534 // Alias Instructions
535 //===----------------------------------------------------------------------===//
537 // Alias instructions that map zero vector to pxor.
538 let isReMaterializable = 1, isCodeGenOnly = 1 in {
539 def MMX_V_SET0 : MMXI<0xEF, MRMInitReg, (outs VR64:$dst), (ins),
541 [(set VR64:$dst, (v2i32 immAllZerosV))]>;
542 def MMX_V_SETALLONES : MMXI<0x76, MRMInitReg, (outs VR64:$dst), (ins),
543 "pcmpeqd\t$dst, $dst",
544 [(set VR64:$dst, (v2i32 immAllOnesV))]>;
547 let Predicates = [HasMMX] in {
548 def : Pat<(v1i64 immAllZerosV), (MMX_V_SET0)>;
549 def : Pat<(v4i16 immAllZerosV), (MMX_V_SET0)>;
550 def : Pat<(v8i8 immAllZerosV), (MMX_V_SET0)>;
553 //===----------------------------------------------------------------------===//
554 // Non-Instruction Patterns
555 //===----------------------------------------------------------------------===//
557 // Store 64-bit integer vector values.
558 def : Pat<(store (v8i8 VR64:$src), addr:$dst),
559 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
560 def : Pat<(store (v4i16 VR64:$src), addr:$dst),
561 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
562 def : Pat<(store (v2i32 VR64:$src), addr:$dst),
563 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
564 def : Pat<(store (v2f32 VR64:$src), addr:$dst),
565 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
566 def : Pat<(store (v1i64 VR64:$src), addr:$dst),
567 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
570 def : Pat<(v8i8 (bitconvert (v1i64 VR64:$src))), (v8i8 VR64:$src)>;
571 def : Pat<(v8i8 (bitconvert (v2i32 VR64:$src))), (v8i8 VR64:$src)>;
572 def : Pat<(v8i8 (bitconvert (v2f32 VR64:$src))), (v8i8 VR64:$src)>;
573 def : Pat<(v8i8 (bitconvert (v4i16 VR64:$src))), (v8i8 VR64:$src)>;
574 def : Pat<(v4i16 (bitconvert (v1i64 VR64:$src))), (v4i16 VR64:$src)>;
575 def : Pat<(v4i16 (bitconvert (v2i32 VR64:$src))), (v4i16 VR64:$src)>;
576 def : Pat<(v4i16 (bitconvert (v2f32 VR64:$src))), (v4i16 VR64:$src)>;
577 def : Pat<(v4i16 (bitconvert (v8i8 VR64:$src))), (v4i16 VR64:$src)>;
578 def : Pat<(v2i32 (bitconvert (v1i64 VR64:$src))), (v2i32 VR64:$src)>;
579 def : Pat<(v2i32 (bitconvert (v2f32 VR64:$src))), (v2i32 VR64:$src)>;
580 def : Pat<(v2i32 (bitconvert (v4i16 VR64:$src))), (v2i32 VR64:$src)>;
581 def : Pat<(v2i32 (bitconvert (v8i8 VR64:$src))), (v2i32 VR64:$src)>;
582 def : Pat<(v2f32 (bitconvert (v1i64 VR64:$src))), (v2f32 VR64:$src)>;
583 def : Pat<(v2f32 (bitconvert (v2i32 VR64:$src))), (v2f32 VR64:$src)>;
584 def : Pat<(v2f32 (bitconvert (v4i16 VR64:$src))), (v2f32 VR64:$src)>;
585 def : Pat<(v2f32 (bitconvert (v8i8 VR64:$src))), (v2f32 VR64:$src)>;
586 def : Pat<(v1i64 (bitconvert (v2i32 VR64:$src))), (v1i64 VR64:$src)>;
587 def : Pat<(v1i64 (bitconvert (v2f32 VR64:$src))), (v1i64 VR64:$src)>;
588 def : Pat<(v1i64 (bitconvert (v4i16 VR64:$src))), (v1i64 VR64:$src)>;
589 def : Pat<(v1i64 (bitconvert (v8i8 VR64:$src))), (v1i64 VR64:$src)>;
591 // 64-bit bit convert.
592 def : Pat<(v1i64 (bitconvert (i64 GR64:$src))),
593 (MMX_MOVD64to64rr GR64:$src)>;
594 def : Pat<(v2i32 (bitconvert (i64 GR64:$src))),
595 (MMX_MOVD64to64rr GR64:$src)>;
596 def : Pat<(v2f32 (bitconvert (i64 GR64:$src))),
597 (MMX_MOVD64to64rr GR64:$src)>;
598 def : Pat<(v4i16 (bitconvert (i64 GR64:$src))),
599 (MMX_MOVD64to64rr GR64:$src)>;
600 def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
601 (MMX_MOVD64to64rr GR64:$src)>;
602 def : Pat<(i64 (bitconvert (v1i64 VR64:$src))),
603 (MMX_MOVD64from64rr VR64:$src)>;
604 def : Pat<(i64 (bitconvert (v2i32 VR64:$src))),
605 (MMX_MOVD64from64rr VR64:$src)>;
606 def : Pat<(i64 (bitconvert (v2f32 VR64:$src))),
607 (MMX_MOVD64from64rr VR64:$src)>;
608 def : Pat<(i64 (bitconvert (v4i16 VR64:$src))),
609 (MMX_MOVD64from64rr VR64:$src)>;
610 def : Pat<(i64 (bitconvert (v8i8 VR64:$src))),
611 (MMX_MOVD64from64rr VR64:$src)>;
612 def : Pat<(f64 (bitconvert (v1i64 VR64:$src))),
613 (MMX_MOVQ2FR64rr VR64:$src)>;
614 def : Pat<(f64 (bitconvert (v2i32 VR64:$src))),
615 (MMX_MOVQ2FR64rr VR64:$src)>;
616 def : Pat<(f64 (bitconvert (v4i16 VR64:$src))),
617 (MMX_MOVQ2FR64rr VR64:$src)>;
618 def : Pat<(f64 (bitconvert (v8i8 VR64:$src))),
619 (MMX_MOVQ2FR64rr VR64:$src)>;
621 let AddedComplexity = 20 in {
622 def : Pat<(v2i32 (X86vzmovl (bc_v2i32 (load_mmx addr:$src)))),
623 (MMX_MOVZDI2PDIrm addr:$src)>;
627 let AddedComplexity = 15 in {
628 def : Pat<(v2i32 (X86vzmovl VR64:$src)),
629 (MMX_PUNPCKLDQrr VR64:$src, (MMX_V_SET0))>;
632 // Patterns to perform canonical versions of vector shuffling.
633 let AddedComplexity = 10 in {
634 def : Pat<(v8i8 (mmx_unpckl_undef VR64:$src, (undef))),
635 (MMX_PUNPCKLBWrr VR64:$src, VR64:$src)>;
636 def : Pat<(v4i16 (mmx_unpckl_undef VR64:$src, (undef))),
637 (MMX_PUNPCKLWDrr VR64:$src, VR64:$src)>;
638 def : Pat<(v2i32 (mmx_unpckl_undef VR64:$src, (undef))),
639 (MMX_PUNPCKLDQrr VR64:$src, VR64:$src)>;
642 let AddedComplexity = 10 in {
643 def : Pat<(v8i8 (mmx_unpckh_undef VR64:$src, (undef))),
644 (MMX_PUNPCKHBWrr VR64:$src, VR64:$src)>;
645 def : Pat<(v4i16 (mmx_unpckh_undef VR64:$src, (undef))),
646 (MMX_PUNPCKHWDrr VR64:$src, VR64:$src)>;
647 def : Pat<(v2i32 (mmx_unpckh_undef VR64:$src, (undef))),
648 (MMX_PUNPCKHDQrr VR64:$src, VR64:$src)>;
651 // Patterns to perform vector shuffling with a zeroed out vector.
652 let AddedComplexity = 20 in {
653 def : Pat<(bc_v2i32 (mmx_unpckl immAllZerosV,
654 (v2i32 (scalar_to_vector (load_mmx addr:$src))))),
655 (MMX_PUNPCKLDQrm VR64:$src, VR64:$src)>;
658 // Some special case PANDN patterns.
659 // FIXME: Get rid of these.
660 def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
662 (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
663 def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))),
665 (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
666 def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))),
668 (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
670 def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
672 (MMX_PANDNrm VR64:$src1, addr:$src2)>;
673 def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))),
675 (MMX_PANDNrm VR64:$src1, addr:$src2)>;
676 def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))),
678 (MMX_PANDNrm VR64:$src1, addr:$src2)>;
680 // Move MMX to lower 64-bit of XMM
681 def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v8i8 VR64:$src))))),
682 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
683 def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v4i16 VR64:$src))))),
684 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
685 def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v2i32 VR64:$src))))),
686 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
687 def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v1i64 VR64:$src))))),
688 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
690 // Move lower 64-bit of XMM to MMX.
691 def : Pat<(v2i32 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
693 (v2i32 (MMX_MOVDQ2Qrr VR128:$src))>;
694 def : Pat<(v4i16 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
696 (v4i16 (MMX_MOVDQ2Qrr VR128:$src))>;
697 def : Pat<(v8i8 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
699 (v8i8 (MMX_MOVDQ2Qrr VR128:$src))>;
701 // Patterns for vector comparisons
702 def : Pat<(v8i8 (X86pcmpeqb VR64:$src1, VR64:$src2)),
703 (MMX_PCMPEQBrr VR64:$src1, VR64:$src2)>;
704 def : Pat<(v8i8 (X86pcmpeqb VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
705 (MMX_PCMPEQBrm VR64:$src1, addr:$src2)>;
706 def : Pat<(v4i16 (X86pcmpeqw VR64:$src1, VR64:$src2)),
707 (MMX_PCMPEQWrr VR64:$src1, VR64:$src2)>;
708 def : Pat<(v4i16 (X86pcmpeqw VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
709 (MMX_PCMPEQWrm VR64:$src1, addr:$src2)>;
710 def : Pat<(v2i32 (X86pcmpeqd VR64:$src1, VR64:$src2)),
711 (MMX_PCMPEQDrr VR64:$src1, VR64:$src2)>;
712 def : Pat<(v2i32 (X86pcmpeqd VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
713 (MMX_PCMPEQDrm VR64:$src1, addr:$src2)>;
715 def : Pat<(v8i8 (X86pcmpgtb VR64:$src1, VR64:$src2)),
716 (MMX_PCMPGTBrr VR64:$src1, VR64:$src2)>;
717 def : Pat<(v8i8 (X86pcmpgtb VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
718 (MMX_PCMPGTBrm VR64:$src1, addr:$src2)>;
719 def : Pat<(v4i16 (X86pcmpgtw VR64:$src1, VR64:$src2)),
720 (MMX_PCMPGTWrr VR64:$src1, VR64:$src2)>;
721 def : Pat<(v4i16 (X86pcmpgtw VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
722 (MMX_PCMPGTWrm VR64:$src1, addr:$src2)>;
723 def : Pat<(v2i32 (X86pcmpgtd VR64:$src1, VR64:$src2)),
724 (MMX_PCMPGTDrr VR64:$src1, VR64:$src2)>;
725 def : Pat<(v2i32 (X86pcmpgtd VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
726 (MMX_PCMPGTDrm VR64:$src1, addr:$src2)>;
728 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
729 // instruction selection into a branch sequence.
730 let Uses = [EFLAGS], usesCustomInserter = 1 in {
731 def CMOV_V1I64 : I<0, Pseudo,
732 (outs VR64:$dst), (ins VR64:$t, VR64:$f, i8imm:$cond),
733 "#CMOV_V1I64 PSEUDO!",
735 (v1i64 (X86cmov VR64:$t, VR64:$f, imm:$cond,