1 //======- X86InstrFragmentsSIMD.td - x86 ISA -------------*- tablegen -*-=====//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file provides pattern fragments useful for SIMD instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // MMX Pattern Fragments
16 //===----------------------------------------------------------------------===//
18 def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
19 def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
21 //===----------------------------------------------------------------------===//
22 // SSE specific DAG Nodes.
23 //===----------------------------------------------------------------------===//
25 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
26 SDTCisFP<0>, SDTCisInt<2> ]>;
27 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
28 SDTCisFP<1>, SDTCisVT<3, i8>]>;
30 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
31 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
32 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
33 [SDNPCommutative, SDNPAssociative]>;
34 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
35 [SDNPCommutative, SDNPAssociative]>;
36 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
37 [SDNPCommutative, SDNPAssociative]>;
38 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
39 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
40 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
41 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
42 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
43 def X86pshufb : SDNode<"X86ISD::PSHUFB",
44 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
46 def X86pandn : SDNode<"X86ISD::PANDN",
47 SDTypeProfile<1, 2, [SDTCisVT<0, v2i64>, SDTCisSameAs<0,1>,
49 def X86psignb : SDNode<"X86ISD::PSIGNB",
50 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
52 def X86psignw : SDNode<"X86ISD::PSIGNW",
53 SDTypeProfile<1, 2, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
55 def X86psignd : SDNode<"X86ISD::PSIGND",
56 SDTypeProfile<1, 2, [SDTCisVT<0, v4i32>, SDTCisSameAs<0,1>,
58 def X86pextrb : SDNode<"X86ISD::PEXTRB",
59 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
60 def X86pextrw : SDNode<"X86ISD::PEXTRW",
61 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
62 def X86pinsrb : SDNode<"X86ISD::PINSRB",
63 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
64 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
65 def X86pinsrw : SDNode<"X86ISD::PINSRW",
66 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
67 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
68 def X86insrtps : SDNode<"X86ISD::INSERTPS",
69 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
70 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
71 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
72 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
73 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
74 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
75 def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
76 def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
77 def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
78 def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
79 def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
80 def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
81 def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
82 def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
83 def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
84 def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
85 def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
86 def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
88 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
91 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
92 def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
94 // Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
95 // translated into one of the target nodes below during lowering.
96 // Note: this is a work in progress...
97 def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
98 def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
101 def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
102 SDTCisSameAs<0,1>, SDTCisInt<2>]>;
103 def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
104 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
106 def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>;
108 def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
109 def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
110 def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
112 def X86Shufpd : SDNode<"X86ISD::SHUFPD", SDTShuff3OpI>;
113 def X86Shufps : SDNode<"X86ISD::SHUFPS", SDTShuff3OpI>;
115 def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
116 def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
117 def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
119 def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
120 def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
122 def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
123 def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
124 def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
125 def X86Movhlpd : SDNode<"X86ISD::MOVHLPD", SDTShuff2Op>;
127 def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
128 def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
130 def X86Unpcklps : SDNode<"X86ISD::UNPCKLPS", SDTShuff2Op>;
131 def X86Unpcklpd : SDNode<"X86ISD::UNPCKLPD", SDTShuff2Op>;
132 def X86Unpckhps : SDNode<"X86ISD::UNPCKHPS", SDTShuff2Op>;
133 def X86Unpckhpd : SDNode<"X86ISD::UNPCKHPD", SDTShuff2Op>;
135 def X86Punpcklbw : SDNode<"X86ISD::PUNPCKLBW", SDTShuff2Op>;
136 def X86Punpcklwd : SDNode<"X86ISD::PUNPCKLWD", SDTShuff2Op>;
137 def X86Punpckldq : SDNode<"X86ISD::PUNPCKLDQ", SDTShuff2Op>;
138 def X86Punpcklqdq : SDNode<"X86ISD::PUNPCKLQDQ", SDTShuff2Op>;
140 def X86Punpckhbw : SDNode<"X86ISD::PUNPCKHBW", SDTShuff2Op>;
141 def X86Punpckhwd : SDNode<"X86ISD::PUNPCKHWD", SDTShuff2Op>;
142 def X86Punpckhdq : SDNode<"X86ISD::PUNPCKHDQ", SDTShuff2Op>;
143 def X86Punpckhqdq : SDNode<"X86ISD::PUNPCKHQDQ", SDTShuff2Op>;
145 //===----------------------------------------------------------------------===//
146 // SSE Complex Patterns
147 //===----------------------------------------------------------------------===//
149 // These are 'extloads' from a scalar to the low element of a vector, zeroing
150 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
152 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
153 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
155 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
156 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
159 def ssmem : Operand<v4f32> {
160 let PrintMethod = "printf32mem";
161 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
162 let ParserMatchClass = X86MemAsmOperand;
164 def sdmem : Operand<v2f64> {
165 let PrintMethod = "printf64mem";
166 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
167 let ParserMatchClass = X86MemAsmOperand;
170 //===----------------------------------------------------------------------===//
171 // SSE pattern fragments
172 //===----------------------------------------------------------------------===//
174 // 128-bit load pattern fragments
175 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
176 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
177 def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
178 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
180 // 256-bit load pattern fragments
181 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
182 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
183 def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
184 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
186 // Like 'store', but always requires vector alignment.
187 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
188 (store node:$val, node:$ptr), [{
189 return cast<StoreSDNode>(N)->getAlignment() >= 16;
192 // Like 'load', but always requires vector alignment.
193 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
194 return cast<LoadSDNode>(N)->getAlignment() >= 16;
197 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
198 (f32 (alignedload node:$ptr))>;
199 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
200 (f64 (alignedload node:$ptr))>;
202 // 128-bit aligned load pattern fragments
203 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
204 (v4f32 (alignedload node:$ptr))>;
205 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
206 (v2f64 (alignedload node:$ptr))>;
207 def alignedloadv4i32 : PatFrag<(ops node:$ptr),
208 (v4i32 (alignedload node:$ptr))>;
209 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
210 (v2i64 (alignedload node:$ptr))>;
212 // 256-bit aligned load pattern fragments
213 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
214 (v8f32 (alignedload node:$ptr))>;
215 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
216 (v4f64 (alignedload node:$ptr))>;
217 def alignedloadv8i32 : PatFrag<(ops node:$ptr),
218 (v8i32 (alignedload node:$ptr))>;
219 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
220 (v4i64 (alignedload node:$ptr))>;
222 // Like 'load', but uses special alignment checks suitable for use in
223 // memory operands in most SSE instructions, which are required to
224 // be naturally aligned on some targets but not on others. If the subtarget
225 // allows unaligned accesses, match any load, though this may require
226 // setting a feature bit in the processor (on startup, for example).
227 // Opteron 10h and later implement such a feature.
228 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
229 return Subtarget->hasVectorUAMem()
230 || cast<LoadSDNode>(N)->getAlignment() >= 16;
233 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
234 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
236 // 128-bit memop pattern fragments
237 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
238 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
239 def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
240 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
241 def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop node:$ptr))>;
242 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
244 // 256-bit memop pattern fragments
245 def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>;
246 def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
247 def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
248 def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
249 def memopv8i32 : PatFrag<(ops node:$ptr), (v8i32 (memop node:$ptr))>;
251 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
253 // FIXME: 8 byte alignment for mmx reads is not required
254 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
255 return cast<LoadSDNode>(N)->getAlignment() >= 8;
258 def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>;
261 // Like 'store', but requires the non-temporal bit to be set
262 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
263 (st node:$val, node:$ptr), [{
264 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
265 return ST->isNonTemporal();
269 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
270 (st node:$val, node:$ptr), [{
271 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
272 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
273 ST->getAddressingMode() == ISD::UNINDEXED &&
274 ST->getAlignment() >= 16;
278 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
279 (st node:$val, node:$ptr), [{
280 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
281 return ST->isNonTemporal() &&
282 ST->getAlignment() < 16;
286 // 128-bit bitconvert pattern fragments
287 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
288 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
289 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
290 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
291 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
292 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
294 // 256-bit bitconvert pattern fragments
295 def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
297 def vzmovl_v2i64 : PatFrag<(ops node:$src),
298 (bitconvert (v2i64 (X86vzmovl
299 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
300 def vzmovl_v4i32 : PatFrag<(ops node:$src),
301 (bitconvert (v4i32 (X86vzmovl
302 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
304 def vzload_v2i64 : PatFrag<(ops node:$src),
305 (bitconvert (v2i64 (X86vzload node:$src)))>;
308 def fp32imm0 : PatLeaf<(f32 fpimm), [{
309 return N->isExactlyValue(+0.0);
312 // BYTE_imm - Transform bit immediates into byte immediates.
313 def BYTE_imm : SDNodeXForm<imm, [{
314 // Transformation function: imm >> 3
315 return getI32Imm(N->getZExtValue() >> 3);
318 // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
320 def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
321 return getI8Imm(X86::getShuffleSHUFImmediate(N));
324 // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
326 def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
327 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
330 // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
332 def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
333 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
336 // SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
338 def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
339 return getI8Imm(X86::getShufflePALIGNRImmediate(N));
342 def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
343 (vector_shuffle node:$lhs, node:$rhs), [{
344 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
345 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
348 def movddup : PatFrag<(ops node:$lhs, node:$rhs),
349 (vector_shuffle node:$lhs, node:$rhs), [{
350 return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
353 def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
354 (vector_shuffle node:$lhs, node:$rhs), [{
355 return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
358 def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
359 (vector_shuffle node:$lhs, node:$rhs), [{
360 return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
363 def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
364 (vector_shuffle node:$lhs, node:$rhs), [{
365 return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
368 def movlp : PatFrag<(ops node:$lhs, node:$rhs),
369 (vector_shuffle node:$lhs, node:$rhs), [{
370 return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
373 def movl : PatFrag<(ops node:$lhs, node:$rhs),
374 (vector_shuffle node:$lhs, node:$rhs), [{
375 return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
378 def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
379 (vector_shuffle node:$lhs, node:$rhs), [{
380 return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
383 def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
384 (vector_shuffle node:$lhs, node:$rhs), [{
385 return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
388 def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
389 (vector_shuffle node:$lhs, node:$rhs), [{
390 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
393 def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
394 (vector_shuffle node:$lhs, node:$rhs), [{
395 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
398 def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
399 (vector_shuffle node:$lhs, node:$rhs), [{
400 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
403 def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
404 (vector_shuffle node:$lhs, node:$rhs), [{
405 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
408 def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
409 (vector_shuffle node:$lhs, node:$rhs), [{
410 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
411 }], SHUFFLE_get_shuf_imm>;
413 def shufp : PatFrag<(ops node:$lhs, node:$rhs),
414 (vector_shuffle node:$lhs, node:$rhs), [{
415 return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
416 }], SHUFFLE_get_shuf_imm>;
418 def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
419 (vector_shuffle node:$lhs, node:$rhs), [{
420 return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
421 }], SHUFFLE_get_pshufhw_imm>;
423 def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
424 (vector_shuffle node:$lhs, node:$rhs), [{
425 return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
426 }], SHUFFLE_get_pshuflw_imm>;
428 def palign : PatFrag<(ops node:$lhs, node:$rhs),
429 (vector_shuffle node:$lhs, node:$rhs), [{
430 return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
431 }], SHUFFLE_get_palign_imm>;