1 //===-- X86InstrFragmentsSIMD.td - x86 SIMD ISA ------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file provides pattern fragments useful for SIMD instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // MMX specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
18 // Low word of MMX to GPR.
19 def MMX_X86movd2w : SDNode<"X86ISD::MMX_MOVD2W", SDTypeProfile<1, 1,
20 [SDTCisVT<0, i32>, SDTCisVT<1, x86mmx>]>>;
21 // GPR to low word of MMX.
22 def MMX_X86movw2d : SDNode<"X86ISD::MMX_MOVW2D", SDTypeProfile<1, 1,
23 [SDTCisVT<0, x86mmx>, SDTCisVT<1, i32>]>>;
25 //===----------------------------------------------------------------------===//
26 // MMX Pattern Fragments
27 //===----------------------------------------------------------------------===//
29 def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
30 def load_mvmmx : PatFrag<(ops node:$ptr),
31 (x86mmx (MMX_X86movw2d (load node:$ptr)))>;
32 def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
34 //===----------------------------------------------------------------------===//
35 // SSE specific DAG Nodes.
36 //===----------------------------------------------------------------------===//
38 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
39 SDTCisFP<0>, SDTCisInt<2> ]>;
40 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
41 SDTCisFP<1>, SDTCisVT<3, i8>,
44 def X86umin : SDNode<"X86ISD::UMIN", SDTIntBinOp>;
45 def X86umax : SDNode<"X86ISD::UMAX", SDTIntBinOp>;
46 def X86smin : SDNode<"X86ISD::SMIN", SDTIntBinOp>;
47 def X86smax : SDNode<"X86ISD::SMAX", SDTIntBinOp>;
49 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
50 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
52 // Commutative and Associative FMIN and FMAX.
53 def X86fminc : SDNode<"X86ISD::FMINC", SDTFPBinOp,
54 [SDNPCommutative, SDNPAssociative]>;
55 def X86fmaxc : SDNode<"X86ISD::FMAXC", SDTFPBinOp,
56 [SDNPCommutative, SDNPAssociative]>;
58 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
59 [SDNPCommutative, SDNPAssociative]>;
60 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
61 [SDNPCommutative, SDNPAssociative]>;
62 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
63 [SDNPCommutative, SDNPAssociative]>;
64 def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp,
65 [SDNPCommutative, SDNPAssociative]>;
66 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
67 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
68 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
69 def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
70 def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>;
71 def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>;
72 def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
73 def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>;
74 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
75 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
76 def X86cmps : SDNode<"X86ISD::FSETCC", SDTX86Cmps>;
77 //def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
78 def X86pshufb : SDNode<"X86ISD::PSHUFB",
79 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
81 def X86psadbw : SDNode<"X86ISD::PSADBW",
82 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
84 def X86andnp : SDNode<"X86ISD::ANDNP",
85 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
87 def X86psign : SDNode<"X86ISD::PSIGN",
88 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
90 def X86pextrb : SDNode<"X86ISD::PEXTRB",
91 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
92 def X86pextrw : SDNode<"X86ISD::PEXTRW",
93 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
94 def X86pinsrb : SDNode<"X86ISD::PINSRB",
95 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
96 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
97 def X86pinsrw : SDNode<"X86ISD::PINSRW",
98 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
99 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
100 def X86insertps : SDNode<"X86ISD::INSERTPS",
101 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
102 SDTCisVT<2, v4f32>, SDTCisVT<3, i8>]>>;
103 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
104 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
106 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
107 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
109 def X86vzext : SDNode<"X86ISD::VZEXT",
110 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
111 SDTCisInt<0>, SDTCisInt<1>,
112 SDTCisOpSmallerThanOp<1, 0>]>>;
114 def X86vsext : SDNode<"X86ISD::VSEXT",
115 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
116 SDTCisInt<0>, SDTCisInt<1>,
117 SDTCisOpSmallerThanOp<1, 0>]>>;
119 def X86vtrunc : SDNode<"X86ISD::VTRUNC",
120 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
121 SDTCisInt<0>, SDTCisInt<1>,
122 SDTCisOpSmallerThanOp<0, 1>]>>;
123 def X86trunc : SDNode<"X86ISD::TRUNC",
124 SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>,
125 SDTCisOpSmallerThanOp<0, 1>]>>;
127 def X86vtruncm : SDNode<"X86ISD::VTRUNCM",
128 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
129 SDTCisInt<0>, SDTCisInt<1>,
130 SDTCisVec<2>, SDTCisInt<2>,
131 SDTCisOpSmallerThanOp<0, 2>]>>;
132 def X86vfpext : SDNode<"X86ISD::VFPEXT",
133 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
134 SDTCisFP<0>, SDTCisFP<1>,
135 SDTCisOpSmallerThanOp<1, 0>]>>;
136 def X86vfpround: SDNode<"X86ISD::VFPROUND",
137 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
138 SDTCisFP<0>, SDTCisFP<1>,
139 SDTCisOpSmallerThanOp<0, 1>]>>;
141 def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
142 def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
143 def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>;
144 def X86pcmpeq : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>;
145 def X86pcmpgt : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>;
147 def X86IntCmpMask : SDTypeProfile<1, 2,
148 [SDTCisVec<0>, SDTCisSameAs<1, 2>, SDTCisInt<1>]>;
149 def X86pcmpeqm : SDNode<"X86ISD::PCMPEQM", X86IntCmpMask, [SDNPCommutative]>;
150 def X86pcmpgtm : SDNode<"X86ISD::PCMPGTM", X86IntCmpMask>;
153 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCVecEltisVT<0, i1>,
154 SDTCisVec<1>, SDTCisSameAs<2, 1>,
155 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<3, i8>]>;
156 def X86CmpMaskCCRound :
157 SDTypeProfile<1, 4, [SDTCisVec<0>,SDTCVecEltisVT<0, i1>,
158 SDTCisVec<1>, SDTCisSameAs<2, 1>,
159 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<3, i8>,
161 def X86CmpMaskCCScalar :
162 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
164 def X86cmpm : SDNode<"X86ISD::CMPM", X86CmpMaskCC>;
165 def X86cmpmRnd : SDNode<"X86ISD::CMPM_RND", X86CmpMaskCCRound>;
166 def X86cmpmu : SDNode<"X86ISD::CMPMU", X86CmpMaskCC>;
167 def X86cmpms : SDNode<"X86ISD::FSETCC", X86CmpMaskCCScalar>;
169 def X86vshl : SDNode<"X86ISD::VSHL",
170 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
172 def X86vsrl : SDNode<"X86ISD::VSRL",
173 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
175 def X86vsra : SDNode<"X86ISD::VSRA",
176 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
179 def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
180 def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
181 def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
183 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
185 SDTCisSameAs<2, 1>]>;
186 def X86addus : SDNode<"X86ISD::ADDUS", SDTIntBinOp>;
187 def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
188 def X86adds : SDNode<"X86ISD::ADDS", SDTIntBinOp>;
189 def X86subs : SDNode<"X86ISD::SUBS", SDTIntBinOp>;
190 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
191 def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
192 def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
193 def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>,
194 SDTCisVec<1>, SDTCisSameAs<2, 1>,
195 SDTCVecEltisVT<0, i1>,
196 SDTCisSameNumEltsAs<0, 1>]>>;
197 def X86testnm : SDNode<"X86ISD::TESTNM", SDTypeProfile<1, 2, [SDTCisVec<0>,
198 SDTCisVec<1>, SDTCisSameAs<2, 1>,
199 SDTCVecEltisVT<0, i1>,
200 SDTCisSameNumEltsAs<0, 1>]>>;
201 def X86select : SDNode<"X86ISD::SELECT" , SDTSelect>;
203 def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
204 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
205 SDTCisSameAs<1,2>]>>;
206 def X86pmuldq : SDNode<"X86ISD::PMULDQ",
207 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
208 SDTCisSameAs<1,2>]>>;
210 // Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
211 // translated into one of the target nodes below during lowering.
212 // Note: this is a work in progress...
213 def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
214 def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
216 def SDTShuff3Op : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
217 SDTCisSameAs<0,2>, SDTCisSameAs<0,3>]>;
219 def SDTShuff2OpM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
221 def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
222 SDTCisSameAs<0,1>, SDTCisInt<2>]>;
223 def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
224 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
226 def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
227 def SDTVBroadcastm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>]>;
229 def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
230 SDTCisSameAs<1,2>, SDTCisVT<3, i8>]>;
232 def SDTFPBinOpRound : SDTypeProfile<1, 3, [ // fadd_round, fmul_round, etc.
233 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>, SDTCisInt<3>]>;
235 def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
236 SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
237 def SDTFmaRound : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
238 SDTCisSameAs<1,2>, SDTCisSameAs<1,3>, SDTCisInt<4>]>;
239 def STDFp1SrcRm : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>,
240 SDTCisVec<0>, SDTCisInt<2>]>;
241 def STDFp2SrcRm : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
242 SDTCisVec<0>, SDTCisInt<3>]>;
243 def STDFp3SrcRm : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
244 SDTCisVec<0>, SDTCisInt<3>, SDTCisInt<4>]>;
246 def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>;
247 def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>;
249 def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
250 def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
251 def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
253 def X86Shufp : SDNode<"X86ISD::SHUFP", SDTShuff3OpI>;
255 def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
256 def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
257 def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
259 def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
260 def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
262 def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
263 def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
264 def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
266 def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
267 def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
269 def SDTPack : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<2, 1>]>;
270 def X86Packss : SDNode<"X86ISD::PACKSS", SDTPack>;
271 def X86Packus : SDNode<"X86ISD::PACKUS", SDTPack>;
273 def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>;
274 def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
276 def X86VPermilpv : SDNode<"X86ISD::VPERMILPV", SDTShuff2OpM>;
277 def X86VPermilpi : SDNode<"X86ISD::VPERMILPI", SDTShuff2OpI>;
278 def X86VPermv : SDNode<"X86ISD::VPERMV", SDTShuff2Op>;
279 def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>;
280 def X86VPermv3 : SDNode<"X86ISD::VPERMV3", SDTShuff3Op>;
281 def X86VPermiv3 : SDNode<"X86ISD::VPERMIV3", SDTShuff3Op>;
283 def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
285 def X86SubVBroadcast : SDNode<"X86ISD::SUBV_BROADCAST",
286 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
287 SDTCisSubVecOfVec<1, 0>]>, []>;
288 def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
289 def X86Vinsert : SDNode<"X86ISD::VINSERT", SDTypeProfile<1, 3,
290 [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
291 def X86Vextract : SDNode<"X86ISD::VEXTRACT", SDTypeProfile<1, 2,
292 [SDTCisVec<1>, SDTCisPtrTy<2>]>, []>;
294 def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>;
296 def X86Addsub : SDNode<"X86ISD::ADDSUB", SDTFPBinOp>;
298 def X86faddRnd : SDNode<"X86ISD::FADD_RND", SDTFPBinOpRound>;
299 def X86fsubRnd : SDNode<"X86ISD::FSUB_RND", SDTFPBinOpRound>;
300 def X86fmulRnd : SDNode<"X86ISD::FMUL_RND", SDTFPBinOpRound>;
301 def X86fdivRnd : SDNode<"X86ISD::FDIV_RND", SDTFPBinOpRound>;
302 def X86fmaxRnd : SDNode<"X86ISD::FMAX_RND", SDTFPBinOpRound>;
303 def X86fminRnd : SDNode<"X86ISD::FMIN_RND", SDTFPBinOpRound>;
305 def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
306 def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
307 def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>;
308 def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>;
309 def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFma>;
310 def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFma>;
312 def X86FmaddRnd : SDNode<"X86ISD::FMADD_RND", SDTFmaRound>;
313 def X86FnmaddRnd : SDNode<"X86ISD::FNMADD_RND", SDTFmaRound>;
314 def X86FmsubRnd : SDNode<"X86ISD::FMSUB_RND", SDTFmaRound>;
315 def X86FnmsubRnd : SDNode<"X86ISD::FNMSUB_RND", SDTFmaRound>;
316 def X86FmaddsubRnd : SDNode<"X86ISD::FMADDSUB_RND", SDTFmaRound>;
317 def X86FmsubaddRnd : SDNode<"X86ISD::FMSUBADD_RND", SDTFmaRound>;
319 def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", STDFp1SrcRm>;
320 def X86rcp28 : SDNode<"X86ISD::RCP28", STDFp1SrcRm>;
321 def X86exp2 : SDNode<"X86ISD::EXP2", STDFp1SrcRm>;
323 def X86rsqrt28s : SDNode<"X86ISD::RSQRT28", STDFp2SrcRm>;
324 def X86rcp28s : SDNode<"X86ISD::RCP28", STDFp2SrcRm>;
325 def X86RndScale : SDNode<"X86ISD::RNDSCALE", STDFp3SrcRm>;
327 def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
328 SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
330 def SDT_PCMPESTRI : SDTypeProfile<2, 5, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
331 SDTCisVT<2, v16i8>, SDTCisVT<3, i32>,
332 SDTCisVT<4, v16i8>, SDTCisVT<5, i32>,
335 def X86pcmpistri : SDNode<"X86ISD::PCMPISTRI", SDT_PCMPISTRI>;
336 def X86pcmpestri : SDNode<"X86ISD::PCMPESTRI", SDT_PCMPESTRI>;
338 def X86compress: SDNode<"X86ISD::COMPRESS", SDTypeProfile<1, 3,
339 [SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,
340 SDTCisVec<3>, SDTCisVec<1>, SDTCisInt<1>]>, []>;
341 def X86expand : SDNode<"X86ISD::EXPAND", SDTypeProfile<1, 3,
343 SDTCisVec<3>, SDTCisVec<1>, SDTCisInt<1>]>, []>;
345 //===----------------------------------------------------------------------===//
346 // SSE Complex Patterns
347 //===----------------------------------------------------------------------===//
349 // These are 'extloads' from a scalar to the low element of a vector, zeroing
350 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
352 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
353 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
355 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
356 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
359 def ssmem : Operand<v4f32> {
360 let PrintMethod = "printf32mem";
361 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
362 let ParserMatchClass = X86Mem32AsmOperand;
363 let OperandType = "OPERAND_MEMORY";
365 def sdmem : Operand<v2f64> {
366 let PrintMethod = "printf64mem";
367 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
368 let ParserMatchClass = X86Mem64AsmOperand;
369 let OperandType = "OPERAND_MEMORY";
372 //===----------------------------------------------------------------------===//
373 // SSE pattern fragments
374 //===----------------------------------------------------------------------===//
376 // 128-bit load pattern fragments
377 // NOTE: all 128-bit integer vector loads are promoted to v2i64
378 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
379 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
380 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
382 // 256-bit load pattern fragments
383 // NOTE: all 256-bit integer vector loads are promoted to v4i64
384 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
385 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
386 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
388 // 512-bit load pattern fragments
389 def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (load node:$ptr))>;
390 def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>;
391 def loadv64i8 : PatFrag<(ops node:$ptr), (v64i8 (load node:$ptr))>;
392 def loadv32i16 : PatFrag<(ops node:$ptr), (v32i16 (load node:$ptr))>;
393 def loadv16i32 : PatFrag<(ops node:$ptr), (v16i32 (load node:$ptr))>;
394 def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>;
396 // 128-/256-/512-bit extload pattern fragments
397 def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
398 def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>;
399 def extloadv8f32 : PatFrag<(ops node:$ptr), (v8f64 (extloadvf32 node:$ptr))>;
401 // These are needed to match a scalar load that is used in a vector-only
402 // math instruction such as the FP logical ops: andps, andnps, orps, xorps.
403 // The memory operand is required to be a 128-bit load, so it must be converted
404 // from a vector to a scalar.
405 def loadf32_128 : PatFrag<(ops node:$ptr),
406 (f32 (vector_extract (loadv4f32 node:$ptr), (iPTR 0)))>;
407 def loadf64_128 : PatFrag<(ops node:$ptr),
408 (f64 (vector_extract (loadv2f64 node:$ptr), (iPTR 0)))>;
410 // Like 'store', but always requires 128-bit vector alignment.
411 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
412 (store node:$val, node:$ptr), [{
413 return cast<StoreSDNode>(N)->getAlignment() >= 16;
416 // Like 'store', but always requires 256-bit vector alignment.
417 def alignedstore256 : PatFrag<(ops node:$val, node:$ptr),
418 (store node:$val, node:$ptr), [{
419 return cast<StoreSDNode>(N)->getAlignment() >= 32;
422 // Like 'store', but always requires 512-bit vector alignment.
423 def alignedstore512 : PatFrag<(ops node:$val, node:$ptr),
424 (store node:$val, node:$ptr), [{
425 return cast<StoreSDNode>(N)->getAlignment() >= 64;
428 // Like 'load', but always requires 128-bit vector alignment.
429 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
430 return cast<LoadSDNode>(N)->getAlignment() >= 16;
433 // Like 'X86vzload', but always requires 128-bit vector alignment.
434 def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
435 return cast<MemSDNode>(N)->getAlignment() >= 16;
438 // Like 'load', but always requires 256-bit vector alignment.
439 def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
440 return cast<LoadSDNode>(N)->getAlignment() >= 32;
443 // Like 'load', but always requires 512-bit vector alignment.
444 def alignedload512 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
445 return cast<LoadSDNode>(N)->getAlignment() >= 64;
448 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
449 (f32 (alignedload node:$ptr))>;
450 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
451 (f64 (alignedload node:$ptr))>;
453 // 128-bit aligned load pattern fragments
454 // NOTE: all 128-bit integer vector loads are promoted to v2i64
455 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
456 (v4f32 (alignedload node:$ptr))>;
457 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
458 (v2f64 (alignedload node:$ptr))>;
459 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
460 (v2i64 (alignedload node:$ptr))>;
462 // 256-bit aligned load pattern fragments
463 // NOTE: all 256-bit integer vector loads are promoted to v4i64
464 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
465 (v8f32 (alignedload256 node:$ptr))>;
466 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
467 (v4f64 (alignedload256 node:$ptr))>;
468 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
469 (v4i64 (alignedload256 node:$ptr))>;
471 // 512-bit aligned load pattern fragments
472 def alignedloadv16f32 : PatFrag<(ops node:$ptr),
473 (v16f32 (alignedload512 node:$ptr))>;
474 def alignedloadv16i32 : PatFrag<(ops node:$ptr),
475 (v16i32 (alignedload512 node:$ptr))>;
476 def alignedloadv8f64 : PatFrag<(ops node:$ptr),
477 (v8f64 (alignedload512 node:$ptr))>;
478 def alignedloadv8i64 : PatFrag<(ops node:$ptr),
479 (v8i64 (alignedload512 node:$ptr))>;
481 // Like 'load', but uses special alignment checks suitable for use in
482 // memory operands in most SSE instructions, which are required to
483 // be naturally aligned on some targets but not on others. If the subtarget
484 // allows unaligned accesses, match any load, though this may require
485 // setting a feature bit in the processor (on startup, for example).
486 // Opteron 10h and later implement such a feature.
487 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
488 return Subtarget->hasSSEUnalignedMem()
489 || cast<LoadSDNode>(N)->getAlignment() >= 16;
492 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
493 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
495 // 128-bit memop pattern fragments
496 // NOTE: all 128-bit integer vector loads are promoted to v2i64
497 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
498 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
499 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
501 // These are needed to match a scalar memop that is used in a vector-only
502 // math instruction such as the FP logical ops: andps, andnps, orps, xorps.
503 // The memory operand is required to be a 128-bit load, so it must be converted
504 // from a vector to a scalar.
505 def memopfsf32_128 : PatFrag<(ops node:$ptr),
506 (f32 (vector_extract (memopv4f32 node:$ptr), (iPTR 0)))>;
507 def memopfsf64_128 : PatFrag<(ops node:$ptr),
508 (f64 (vector_extract (memopv2f64 node:$ptr), (iPTR 0)))>;
511 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
513 // FIXME: 8 byte alignment for mmx reads is not required
514 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
515 return cast<LoadSDNode>(N)->getAlignment() >= 8;
518 def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>;
521 // Like 'store', but requires the non-temporal bit to be set
522 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
523 (st node:$val, node:$ptr), [{
524 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
525 return ST->isNonTemporal();
529 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
530 (st node:$val, node:$ptr), [{
531 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
532 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
533 ST->getAddressingMode() == ISD::UNINDEXED &&
534 ST->getAlignment() >= 16;
538 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
539 (st node:$val, node:$ptr), [{
540 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
541 return ST->isNonTemporal() &&
542 ST->getAlignment() < 16;
546 def mgatherv8i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
547 (masked_gather node:$src1, node:$src2, node:$src3) , [{
548 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
549 return (Mgt->getIndex().getValueType() == MVT::v8i32 ||
550 Mgt->getBasePtr().getValueType() == MVT::v8i32);
554 def mgatherv8i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
555 (masked_gather node:$src1, node:$src2, node:$src3) , [{
556 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
557 return (Mgt->getIndex().getValueType() == MVT::v8i64 ||
558 Mgt->getBasePtr().getValueType() == MVT::v8i64);
561 def mgatherv16i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
562 (masked_gather node:$src1, node:$src2, node:$src3) , [{
563 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
564 return (Mgt->getIndex().getValueType() == MVT::v16i32 ||
565 Mgt->getBasePtr().getValueType() == MVT::v16i32);
569 def mscatterv8i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
570 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
571 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
572 return (Sc->getIndex().getValueType() == MVT::v8i32 ||
573 Sc->getBasePtr().getValueType() == MVT::v8i32);
577 def mscatterv8i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
578 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
579 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
580 return (Sc->getIndex().getValueType() == MVT::v8i64 ||
581 Sc->getBasePtr().getValueType() == MVT::v8i64);
584 def mscatterv16i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
585 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
586 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
587 return (Sc->getIndex().getValueType() == MVT::v16i32 ||
588 Sc->getBasePtr().getValueType() == MVT::v16i32);
592 // 128-bit bitconvert pattern fragments
593 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
594 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
595 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
596 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
597 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
598 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
600 // 256-bit bitconvert pattern fragments
601 def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>;
602 def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
603 def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
604 def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
605 def bc_v8f32 : PatFrag<(ops node:$in), (v8f32 (bitconvert node:$in))>;
607 // 512-bit bitconvert pattern fragments
608 def bc_v16i32 : PatFrag<(ops node:$in), (v16i32 (bitconvert node:$in))>;
609 def bc_v8i64 : PatFrag<(ops node:$in), (v8i64 (bitconvert node:$in))>;
610 def bc_v8f64 : PatFrag<(ops node:$in), (v8f64 (bitconvert node:$in))>;
611 def bc_v16f32 : PatFrag<(ops node:$in), (v16f32 (bitconvert node:$in))>;
613 def vzmovl_v2i64 : PatFrag<(ops node:$src),
614 (bitconvert (v2i64 (X86vzmovl
615 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
616 def vzmovl_v4i32 : PatFrag<(ops node:$src),
617 (bitconvert (v4i32 (X86vzmovl
618 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
620 def vzload_v2i64 : PatFrag<(ops node:$src),
621 (bitconvert (v2i64 (X86vzload node:$src)))>;
624 def fp32imm0 : PatLeaf<(f32 fpimm), [{
625 return N->isExactlyValue(+0.0);
628 def I8Imm : SDNodeXForm<imm, [{
629 // Transformation function: get the low 8 bits.
630 return getI8Imm((uint8_t)N->getZExtValue(), SDLoc(N));
633 def FROUND_NO_EXC : ImmLeaf<i32, [{ return Imm == 8; }]>;
634 def FROUND_CURRENT : ImmLeaf<i32, [{
635 return Imm == X86::STATIC_ROUNDING::CUR_DIRECTION;
638 // BYTE_imm - Transform bit immediates into byte immediates.
639 def BYTE_imm : SDNodeXForm<imm, [{
640 // Transformation function: imm >> 3
641 return getI32Imm(N->getZExtValue() >> 3, SDLoc(N));
644 // EXTRACT_get_vextract128_imm xform function: convert extract_subvector index
645 // to VEXTRACTF128/VEXTRACTI128 imm.
646 def EXTRACT_get_vextract128_imm : SDNodeXForm<extract_subvector, [{
647 return getI8Imm(X86::getExtractVEXTRACT128Immediate(N), SDLoc(N));
650 // INSERT_get_vinsert128_imm xform function: convert insert_subvector index to
651 // VINSERTF128/VINSERTI128 imm.
652 def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{
653 return getI8Imm(X86::getInsertVINSERT128Immediate(N), SDLoc(N));
656 // EXTRACT_get_vextract256_imm xform function: convert extract_subvector index
657 // to VEXTRACTF64x4 imm.
658 def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{
659 return getI8Imm(X86::getExtractVEXTRACT256Immediate(N), SDLoc(N));
662 // INSERT_get_vinsert256_imm xform function: convert insert_subvector index to
664 def INSERT_get_vinsert256_imm : SDNodeXForm<insert_subvector, [{
665 return getI8Imm(X86::getInsertVINSERT256Immediate(N), SDLoc(N));
668 def vextract128_extract : PatFrag<(ops node:$bigvec, node:$index),
669 (extract_subvector node:$bigvec,
671 return X86::isVEXTRACT128Index(N);
672 }], EXTRACT_get_vextract128_imm>;
674 def vinsert128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
676 (insert_subvector node:$bigvec, node:$smallvec,
678 return X86::isVINSERT128Index(N);
679 }], INSERT_get_vinsert128_imm>;
682 def vextract256_extract : PatFrag<(ops node:$bigvec, node:$index),
683 (extract_subvector node:$bigvec,
685 return X86::isVEXTRACT256Index(N);
686 }], EXTRACT_get_vextract256_imm>;
688 def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
690 (insert_subvector node:$bigvec, node:$smallvec,
692 return X86::isVINSERT256Index(N);
693 }], INSERT_get_vinsert256_imm>;
695 def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
696 (masked_load node:$src1, node:$src2, node:$src3), [{
697 if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
698 return Load->getAlignment() >= 16;
702 def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
703 (masked_load node:$src1, node:$src2, node:$src3), [{
704 if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
705 return Load->getAlignment() >= 32;
709 def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
710 (masked_load node:$src1, node:$src2, node:$src3), [{
711 if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
712 return Load->getAlignment() >= 64;
716 def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
717 (masked_load node:$src1, node:$src2, node:$src3), [{
718 return isa<MaskedLoadSDNode>(N);
721 def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
722 (masked_store node:$src1, node:$src2, node:$src3), [{
723 if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
724 return Store->getAlignment() >= 16;
728 def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
729 (masked_store node:$src1, node:$src2, node:$src3), [{
730 if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
731 return Store->getAlignment() >= 32;
735 def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
736 (masked_store node:$src1, node:$src2, node:$src3), [{
737 if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
738 return Store->getAlignment() >= 64;
742 def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
743 (masked_store node:$src1, node:$src2, node:$src3), [{
744 return isa<MaskedStoreSDNode>(N);