1 //===-- X86InstrFragmentsSIMD.td - x86 SIMD ISA ------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file provides pattern fragments useful for SIMD instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // MMX Pattern Fragments
16 //===----------------------------------------------------------------------===//
18 def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
19 def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
21 //===----------------------------------------------------------------------===//
22 // SSE specific DAG Nodes.
23 //===----------------------------------------------------------------------===//
25 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
26 SDTCisFP<0>, SDTCisInt<2> ]>;
27 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
28 SDTCisFP<1>, SDTCisVT<3, i8>,
31 def X86umin : SDNode<"X86ISD::UMIN", SDTIntBinOp>;
32 def X86umax : SDNode<"X86ISD::UMAX", SDTIntBinOp>;
33 def X86smin : SDNode<"X86ISD::SMIN", SDTIntBinOp>;
34 def X86smax : SDNode<"X86ISD::SMAX", SDTIntBinOp>;
36 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
37 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
39 // Commutative and Associative FMIN and FMAX.
40 def X86fminc : SDNode<"X86ISD::FMINC", SDTFPBinOp,
41 [SDNPCommutative, SDNPAssociative]>;
42 def X86fmaxc : SDNode<"X86ISD::FMAXC", SDTFPBinOp,
43 [SDNPCommutative, SDNPAssociative]>;
45 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
46 [SDNPCommutative, SDNPAssociative]>;
47 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
48 [SDNPCommutative, SDNPAssociative]>;
49 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
50 [SDNPCommutative, SDNPAssociative]>;
51 def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp,
52 [SDNPCommutative, SDNPAssociative]>;
53 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
54 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
55 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
56 def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
57 def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>;
58 def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>;
59 def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
60 def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>;
61 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
62 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
63 def X86cmps : SDNode<"X86ISD::FSETCC", SDTX86Cmps>;
64 //def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
65 def X86pshufb : SDNode<"X86ISD::PSHUFB",
66 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
68 def X86andnp : SDNode<"X86ISD::ANDNP",
69 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
71 def X86psign : SDNode<"X86ISD::PSIGN",
72 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
74 def X86pextrb : SDNode<"X86ISD::PEXTRB",
75 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
76 def X86pextrw : SDNode<"X86ISD::PEXTRW",
77 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
78 def X86pinsrb : SDNode<"X86ISD::PINSRB",
79 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
80 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
81 def X86pinsrw : SDNode<"X86ISD::PINSRW",
82 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
83 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
84 def X86insertps : SDNode<"X86ISD::INSERTPS",
85 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
86 SDTCisVT<2, v4f32>, SDTCisVT<3, i8>]>>;
87 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
88 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
90 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
91 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
93 def X86vzext : SDNode<"X86ISD::VZEXT",
94 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
95 SDTCisInt<0>, SDTCisInt<1>,
96 SDTCisOpSmallerThanOp<1, 0>]>>;
98 def X86vsext : SDNode<"X86ISD::VSEXT",
99 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
100 SDTCisInt<0>, SDTCisInt<1>,
101 SDTCisOpSmallerThanOp<1, 0>]>>;
103 def X86vtrunc : SDNode<"X86ISD::VTRUNC",
104 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
105 SDTCisInt<0>, SDTCisInt<1>,
106 SDTCisOpSmallerThanOp<0, 1>]>>;
107 def X86trunc : SDNode<"X86ISD::TRUNC",
108 SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>,
109 SDTCisOpSmallerThanOp<0, 1>]>>;
111 def X86vtruncm : SDNode<"X86ISD::VTRUNCM",
112 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
113 SDTCisInt<0>, SDTCisInt<1>,
114 SDTCisVec<2>, SDTCisInt<2>,
115 SDTCisOpSmallerThanOp<0, 2>]>>;
116 def X86vfpext : SDNode<"X86ISD::VFPEXT",
117 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
118 SDTCisFP<0>, SDTCisFP<1>,
119 SDTCisOpSmallerThanOp<1, 0>]>>;
120 def X86vfpround: SDNode<"X86ISD::VFPROUND",
121 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
122 SDTCisFP<0>, SDTCisFP<1>,
123 SDTCisOpSmallerThanOp<0, 1>]>>;
125 def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
126 def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
127 def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>;
128 def X86pcmpeq : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>;
129 def X86pcmpgt : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>;
131 def X86IntCmpMask : SDTypeProfile<1, 2,
132 [SDTCisVec<0>, SDTCisSameAs<1, 2>, SDTCisInt<1>]>;
133 def X86pcmpeqm : SDNode<"X86ISD::PCMPEQM", X86IntCmpMask, [SDNPCommutative]>;
134 def X86pcmpgtm : SDNode<"X86ISD::PCMPGTM", X86IntCmpMask>;
137 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, SDTCisVec<1>,
138 SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
139 def X86CmpMaskCCScalar :
140 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
142 def X86cmpm : SDNode<"X86ISD::CMPM", X86CmpMaskCC>;
143 def X86cmpmu : SDNode<"X86ISD::CMPMU", X86CmpMaskCC>;
144 def X86cmpms : SDNode<"X86ISD::FSETCC", X86CmpMaskCCScalar>;
146 def X86vshl : SDNode<"X86ISD::VSHL",
147 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
149 def X86vsrl : SDNode<"X86ISD::VSRL",
150 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
152 def X86vsra : SDNode<"X86ISD::VSRA",
153 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
156 def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
157 def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
158 def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
160 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
162 SDTCisSameAs<2, 1>]>;
163 def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
164 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
165 def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
166 def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
167 def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>,
169 SDTCisSameAs<2, 1>]>>;
170 def X86testnm : SDNode<"X86ISD::TESTNM", SDTypeProfile<1, 2, [SDTCisVec<0>,
172 SDTCisSameAs<2, 1>]>>;
173 def X86select : SDNode<"X86ISD::SELECT" , SDTSelect>;
175 def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
176 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
177 SDTCisSameAs<1,2>]>>;
178 def X86pmuldq : SDNode<"X86ISD::PMULDQ",
179 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
180 SDTCisSameAs<1,2>]>>;
182 // Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
183 // translated into one of the target nodes below during lowering.
184 // Note: this is a work in progress...
185 def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
186 def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
188 def SDTShuff3Op : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
189 SDTCisSameAs<0,2>, SDTCisSameAs<0,3>]>;
191 def SDTShuff2OpM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
193 def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
194 SDTCisSameAs<0,1>, SDTCisInt<2>]>;
195 def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
196 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
198 def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
199 def SDTVBroadcastm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>]>;
201 def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
202 SDTCisSameAs<1,2>, SDTCisVT<3, i8>]>;
204 def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
205 SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
206 def SDTFmaRound : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
207 SDTCisSameAs<1,2>, SDTCisSameAs<1,3>, SDTCisInt<4>]>;
208 def STDFp1SrcRm : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>,
209 SDTCisVec<0>, SDTCisInt<2>]>;
210 def STDFp2SrcRm : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
211 SDTCisVec<0>, SDTCisInt<3>]>;
213 def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>;
214 def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>;
216 def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
217 def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
218 def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
220 def X86Shufp : SDNode<"X86ISD::SHUFP", SDTShuff3OpI>;
222 def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
223 def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
224 def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
226 def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
227 def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
229 def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
230 def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
231 def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
233 def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
234 def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
236 def SDTPack : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<2, 1>]>;
237 def X86Packss : SDNode<"X86ISD::PACKSS", SDTPack>;
238 def X86Packus : SDNode<"X86ISD::PACKUS", SDTPack>;
240 def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>;
241 def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
243 def X86VPermilpv : SDNode<"X86ISD::VPERMILPV", SDTShuff2OpM>;
244 def X86VPermilpi : SDNode<"X86ISD::VPERMILPI", SDTShuff2OpI>;
245 def X86VPermv : SDNode<"X86ISD::VPERMV", SDTShuff2Op>;
246 def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>;
247 def X86VPermv3 : SDNode<"X86ISD::VPERMV3", SDTShuff3Op>;
248 def X86VPermiv3 : SDNode<"X86ISD::VPERMIV3", SDTShuff3Op>;
250 def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
252 def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
253 def X86VBroadcastm : SDNode<"X86ISD::VBROADCASTM", SDTVBroadcastm>;
254 def X86Vinsert : SDNode<"X86ISD::VINSERT", SDTypeProfile<1, 3,
255 [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
256 def X86Vextract : SDNode<"X86ISD::VEXTRACT", SDTypeProfile<1, 2,
257 [SDTCisVec<1>, SDTCisPtrTy<2>]>, []>;
259 def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>;
261 def X86Addsub : SDNode<"X86ISD::ADDSUB", SDTFPBinOp>;
263 def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
264 def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
265 def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>;
266 def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>;
267 def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFma>;
268 def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFma>;
270 def X86FmaddRnd : SDNode<"X86ISD::FMADD_RND", SDTFmaRound>;
271 def X86FnmaddRnd : SDNode<"X86ISD::FNMADD_RND", SDTFmaRound>;
272 def X86FmsubRnd : SDNode<"X86ISD::FMSUB_RND", SDTFmaRound>;
273 def X86FnmsubRnd : SDNode<"X86ISD::FNMSUB_RND", SDTFmaRound>;
274 def X86FmaddsubRnd : SDNode<"X86ISD::FMADDSUB_RND", SDTFmaRound>;
275 def X86FmsubaddRnd : SDNode<"X86ISD::FMSUBADD_RND", SDTFmaRound>;
277 def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", STDFp1SrcRm>;
278 def X86rcp28 : SDNode<"X86ISD::RCP28", STDFp1SrcRm>;
279 def X86exp2 : SDNode<"X86ISD::EXP2", STDFp1SrcRm>;
281 def X86rsqrt28s : SDNode<"X86ISD::RSQRT28", STDFp2SrcRm>;
282 def X86rcp28s : SDNode<"X86ISD::RCP28", STDFp2SrcRm>;
284 def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
285 SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
287 def SDT_PCMPESTRI : SDTypeProfile<2, 5, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
288 SDTCisVT<2, v16i8>, SDTCisVT<3, i32>,
289 SDTCisVT<4, v16i8>, SDTCisVT<5, i32>,
292 def X86pcmpistri : SDNode<"X86ISD::PCMPISTRI", SDT_PCMPISTRI>;
293 def X86pcmpestri : SDNode<"X86ISD::PCMPESTRI", SDT_PCMPESTRI>;
295 def X86compress: SDNode<"X86ISD::COMPRESS", SDTypeProfile<1, 3,
296 [SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,
297 SDTCisVec<3>, SDTCisVec<1>, SDTCisInt<1>]>, []>;
298 def X86expand : SDNode<"X86ISD::EXPAND", SDTypeProfile<1, 3,
300 SDTCisVec<3>, SDTCisVec<1>, SDTCisInt<1>]>, []>;
302 //===----------------------------------------------------------------------===//
303 // SSE Complex Patterns
304 //===----------------------------------------------------------------------===//
306 // These are 'extloads' from a scalar to the low element of a vector, zeroing
307 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
309 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
310 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
312 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
313 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
316 def ssmem : Operand<v4f32> {
317 let PrintMethod = "printf32mem";
318 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
319 let ParserMatchClass = X86Mem32AsmOperand;
320 let OperandType = "OPERAND_MEMORY";
322 def sdmem : Operand<v2f64> {
323 let PrintMethod = "printf64mem";
324 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
325 let ParserMatchClass = X86Mem64AsmOperand;
326 let OperandType = "OPERAND_MEMORY";
329 //===----------------------------------------------------------------------===//
330 // SSE pattern fragments
331 //===----------------------------------------------------------------------===//
333 // 128-bit load pattern fragments
334 // NOTE: all 128-bit integer vector loads are promoted to v2i64
335 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
336 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
337 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
339 // 256-bit load pattern fragments
340 // NOTE: all 256-bit integer vector loads are promoted to v4i64
341 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
342 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
343 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
345 // 512-bit load pattern fragments
346 def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (load node:$ptr))>;
347 def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>;
348 def loadv64i8 : PatFrag<(ops node:$ptr), (v64i8 (load node:$ptr))>;
349 def loadv32i16 : PatFrag<(ops node:$ptr), (v32i16 (load node:$ptr))>;
350 def loadv16i32 : PatFrag<(ops node:$ptr), (v16i32 (load node:$ptr))>;
351 def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>;
353 // 128-/256-/512-bit extload pattern fragments
354 def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
355 def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>;
356 def extloadv8f32 : PatFrag<(ops node:$ptr), (v8f64 (extloadvf32 node:$ptr))>;
358 // Like 'store', but always requires 128-bit vector alignment.
359 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
360 (store node:$val, node:$ptr), [{
361 return cast<StoreSDNode>(N)->getAlignment() >= 16;
364 // Like 'store', but always requires 256-bit vector alignment.
365 def alignedstore256 : PatFrag<(ops node:$val, node:$ptr),
366 (store node:$val, node:$ptr), [{
367 return cast<StoreSDNode>(N)->getAlignment() >= 32;
370 // Like 'store', but always requires 512-bit vector alignment.
371 def alignedstore512 : PatFrag<(ops node:$val, node:$ptr),
372 (store node:$val, node:$ptr), [{
373 return cast<StoreSDNode>(N)->getAlignment() >= 64;
376 // Like 'load', but always requires 128-bit vector alignment.
377 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
378 return cast<LoadSDNode>(N)->getAlignment() >= 16;
381 // Like 'X86vzload', but always requires 128-bit vector alignment.
382 def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
383 return cast<MemSDNode>(N)->getAlignment() >= 16;
386 // Like 'load', but always requires 256-bit vector alignment.
387 def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
388 return cast<LoadSDNode>(N)->getAlignment() >= 32;
391 // Like 'load', but always requires 512-bit vector alignment.
392 def alignedload512 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
393 return cast<LoadSDNode>(N)->getAlignment() >= 64;
396 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
397 (f32 (alignedload node:$ptr))>;
398 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
399 (f64 (alignedload node:$ptr))>;
401 // 128-bit aligned load pattern fragments
402 // NOTE: all 128-bit integer vector loads are promoted to v2i64
403 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
404 (v4f32 (alignedload node:$ptr))>;
405 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
406 (v2f64 (alignedload node:$ptr))>;
407 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
408 (v2i64 (alignedload node:$ptr))>;
410 // 256-bit aligned load pattern fragments
411 // NOTE: all 256-bit integer vector loads are promoted to v4i64
412 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
413 (v8f32 (alignedload256 node:$ptr))>;
414 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
415 (v4f64 (alignedload256 node:$ptr))>;
416 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
417 (v4i64 (alignedload256 node:$ptr))>;
419 // 512-bit aligned load pattern fragments
420 def alignedloadv16f32 : PatFrag<(ops node:$ptr),
421 (v16f32 (alignedload512 node:$ptr))>;
422 def alignedloadv16i32 : PatFrag<(ops node:$ptr),
423 (v16i32 (alignedload512 node:$ptr))>;
424 def alignedloadv8f64 : PatFrag<(ops node:$ptr),
425 (v8f64 (alignedload512 node:$ptr))>;
426 def alignedloadv8i64 : PatFrag<(ops node:$ptr),
427 (v8i64 (alignedload512 node:$ptr))>;
429 // Like 'load', but uses special alignment checks suitable for use in
430 // memory operands in most SSE instructions, which are required to
431 // be naturally aligned on some targets but not on others. If the subtarget
432 // allows unaligned accesses, match any load, though this may require
433 // setting a feature bit in the processor (on startup, for example).
434 // Opteron 10h and later implement such a feature.
435 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
436 return Subtarget->hasVectorUAMem()
437 || cast<LoadSDNode>(N)->getAlignment() >= 16;
440 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
441 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
443 // 128-bit memop pattern fragments
444 // NOTE: all 128-bit integer vector loads are promoted to v2i64
445 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
446 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
447 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
449 // 256-bit memop pattern fragments
450 // NOTE: all 256-bit integer vector loads are promoted to v4i64
451 def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
452 def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
453 def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
455 // 512-bit memop pattern fragments
456 def memopv16f32 : PatFrag<(ops node:$ptr), (v16f32 (memop node:$ptr))>;
457 def memopv8f64 : PatFrag<(ops node:$ptr), (v8f64 (memop node:$ptr))>;
458 def memopv16i32 : PatFrag<(ops node:$ptr), (v16i32 (memop node:$ptr))>;
459 def memopv8i64 : PatFrag<(ops node:$ptr), (v8i64 (memop node:$ptr))>;
461 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
463 // FIXME: 8 byte alignment for mmx reads is not required
464 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
465 return cast<LoadSDNode>(N)->getAlignment() >= 8;
468 def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>;
471 // Like 'store', but requires the non-temporal bit to be set
472 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
473 (st node:$val, node:$ptr), [{
474 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
475 return ST->isNonTemporal();
479 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
480 (st node:$val, node:$ptr), [{
481 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
482 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
483 ST->getAddressingMode() == ISD::UNINDEXED &&
484 ST->getAlignment() >= 16;
488 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
489 (st node:$val, node:$ptr), [{
490 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
491 return ST->isNonTemporal() &&
492 ST->getAlignment() < 16;
496 // 128-bit bitconvert pattern fragments
497 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
498 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
499 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
500 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
501 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
502 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
504 // 256-bit bitconvert pattern fragments
505 def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>;
506 def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
507 def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
508 def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
509 def bc_v8f32 : PatFrag<(ops node:$in), (v8f32 (bitconvert node:$in))>;
511 // 512-bit bitconvert pattern fragments
512 def bc_v16i32 : PatFrag<(ops node:$in), (v16i32 (bitconvert node:$in))>;
513 def bc_v8i64 : PatFrag<(ops node:$in), (v8i64 (bitconvert node:$in))>;
514 def bc_v8f64 : PatFrag<(ops node:$in), (v8f64 (bitconvert node:$in))>;
515 def bc_v16f32 : PatFrag<(ops node:$in), (v16f32 (bitconvert node:$in))>;
517 def vzmovl_v2i64 : PatFrag<(ops node:$src),
518 (bitconvert (v2i64 (X86vzmovl
519 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
520 def vzmovl_v4i32 : PatFrag<(ops node:$src),
521 (bitconvert (v4i32 (X86vzmovl
522 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
524 def vzload_v2i64 : PatFrag<(ops node:$src),
525 (bitconvert (v2i64 (X86vzload node:$src)))>;
528 def fp32imm0 : PatLeaf<(f32 fpimm), [{
529 return N->isExactlyValue(+0.0);
532 def I8Imm : SDNodeXForm<imm, [{
533 // Transformation function: get the low 8 bits.
534 return getI8Imm((uint8_t)N->getZExtValue());
537 def FROUND_NO_EXC : ImmLeaf<i32, [{ return Imm == 8; }]>;
538 def FROUND_CURRENT : ImmLeaf<i32, [{
539 return Imm == X86::STATIC_ROUNDING::CUR_DIRECTION;
542 // BYTE_imm - Transform bit immediates into byte immediates.
543 def BYTE_imm : SDNodeXForm<imm, [{
544 // Transformation function: imm >> 3
545 return getI32Imm(N->getZExtValue() >> 3);
548 // EXTRACT_get_vextract128_imm xform function: convert extract_subvector index
549 // to VEXTRACTF128/VEXTRACTI128 imm.
550 def EXTRACT_get_vextract128_imm : SDNodeXForm<extract_subvector, [{
551 return getI8Imm(X86::getExtractVEXTRACT128Immediate(N));
554 // INSERT_get_vinsert128_imm xform function: convert insert_subvector index to
555 // VINSERTF128/VINSERTI128 imm.
556 def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{
557 return getI8Imm(X86::getInsertVINSERT128Immediate(N));
560 // EXTRACT_get_vextract256_imm xform function: convert extract_subvector index
561 // to VEXTRACTF64x4 imm.
562 def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{
563 return getI8Imm(X86::getExtractVEXTRACT256Immediate(N));
566 // INSERT_get_vinsert256_imm xform function: convert insert_subvector index to
568 def INSERT_get_vinsert256_imm : SDNodeXForm<insert_subvector, [{
569 return getI8Imm(X86::getInsertVINSERT256Immediate(N));
572 def vextract128_extract : PatFrag<(ops node:$bigvec, node:$index),
573 (extract_subvector node:$bigvec,
575 return X86::isVEXTRACT128Index(N);
576 }], EXTRACT_get_vextract128_imm>;
578 def vinsert128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
580 (insert_subvector node:$bigvec, node:$smallvec,
582 return X86::isVINSERT128Index(N);
583 }], INSERT_get_vinsert128_imm>;
586 def vextract256_extract : PatFrag<(ops node:$bigvec, node:$index),
587 (extract_subvector node:$bigvec,
589 return X86::isVEXTRACT256Index(N);
590 }], EXTRACT_get_vextract256_imm>;
592 def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
594 (insert_subvector node:$bigvec, node:$smallvec,
596 return X86::isVINSERT256Index(N);
597 }], INSERT_get_vinsert256_imm>;