1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86TargetMachine.h"
21 #include "X86TargetObjectFile.h"
22 #include "llvm/ADT/SmallBitVector.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/VariadicFunction.h"
28 #include "llvm/CodeGen/IntrinsicLowering.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/MC/MCAsmInfo.h"
45 #include "llvm/MC/MCContext.h"
46 #include "llvm/MC/MCExpr.h"
47 #include "llvm/MC/MCSymbol.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Target/TargetOptions.h"
53 #include "X86IntrinsicsInfo.h"
59 #define DEBUG_TYPE "x86-isel"
61 STATISTIC(NumTailCalls, "Number of tail calls");
63 static cl::opt<bool> ExperimentalVectorWideningLegalization(
64 "x86-experimental-vector-widening-legalization", cl::init(false),
65 cl::desc("Enable an experimental vector type legalization through widening "
66 "rather than promotion."),
69 static cl::opt<bool> ExperimentalVectorShuffleLowering(
70 "x86-experimental-vector-shuffle-lowering", cl::init(true),
71 cl::desc("Enable an experimental vector shuffle lowering code path."),
74 static cl::opt<bool> ExperimentalVectorShuffleLegality(
75 "x86-experimental-vector-shuffle-legality", cl::init(false),
76 cl::desc("Enable experimental shuffle legality based on the experimental "
77 "shuffle lowering. Should only be used with the experimental "
81 static cl::opt<int> ReciprocalEstimateRefinementSteps(
82 "x86-recip-refinement-steps", cl::init(1),
83 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
84 "result of the hardware reciprocal estimate instruction."),
87 // Forward declarations.
88 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
91 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
92 SelectionDAG &DAG, SDLoc dl,
93 unsigned vectorWidth) {
94 assert((vectorWidth == 128 || vectorWidth == 256) &&
95 "Unsupported vector width");
96 EVT VT = Vec.getValueType();
97 EVT ElVT = VT.getVectorElementType();
98 unsigned Factor = VT.getSizeInBits()/vectorWidth;
99 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
100 VT.getVectorNumElements()/Factor);
102 // Extract from UNDEF is UNDEF.
103 if (Vec.getOpcode() == ISD::UNDEF)
104 return DAG.getUNDEF(ResultVT);
106 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
107 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
109 // This is the index of the first element of the vectorWidth-bit chunk
111 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
114 // If the input is a buildvector just emit a smaller one.
115 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
116 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
117 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
120 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
121 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
124 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
125 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
126 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
127 /// instructions or a simple subregister reference. Idx is an index in the
128 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
129 /// lowering EXTRACT_VECTOR_ELT operations easier.
130 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
131 SelectionDAG &DAG, SDLoc dl) {
132 assert((Vec.getValueType().is256BitVector() ||
133 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
134 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
137 /// Generate a DAG to grab 256-bits from a 512-bit vector.
138 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
139 SelectionDAG &DAG, SDLoc dl) {
140 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
141 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
144 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
145 unsigned IdxVal, SelectionDAG &DAG,
146 SDLoc dl, unsigned vectorWidth) {
147 assert((vectorWidth == 128 || vectorWidth == 256) &&
148 "Unsupported vector width");
149 // Inserting UNDEF is Result
150 if (Vec.getOpcode() == ISD::UNDEF)
152 EVT VT = Vec.getValueType();
153 EVT ElVT = VT.getVectorElementType();
154 EVT ResultVT = Result.getValueType();
156 // Insert the relevant vectorWidth bits.
157 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
159 // This is the index of the first element of the vectorWidth-bit chunk
161 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
164 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
165 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
168 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
169 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
170 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
171 /// simple superregister reference. Idx is an index in the 128 bits
172 /// we want. It need not be aligned to a 128-bit boundary. That makes
173 /// lowering INSERT_VECTOR_ELT operations easier.
174 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
175 SelectionDAG &DAG,SDLoc dl) {
176 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
177 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
180 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
181 SelectionDAG &DAG, SDLoc dl) {
182 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
183 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
186 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
187 /// instructions. This is used because creating CONCAT_VECTOR nodes of
188 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
189 /// large BUILD_VECTORS.
190 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
191 unsigned NumElems, SelectionDAG &DAG,
193 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
194 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
197 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
198 unsigned NumElems, SelectionDAG &DAG,
200 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
201 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
204 // FIXME: This should stop caching the target machine as soon as
205 // we can remove resetOperationActions et al.
206 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM)
207 : TargetLowering(TM) {
208 Subtarget = &TM.getSubtarget<X86Subtarget>();
209 X86ScalarSSEf64 = Subtarget->hasSSE2();
210 X86ScalarSSEf32 = Subtarget->hasSSE1();
211 TD = getDataLayout();
213 resetOperationActions();
216 void X86TargetLowering::resetOperationActions() {
217 const TargetMachine &TM = getTargetMachine();
218 static bool FirstTimeThrough = true;
220 // If none of the target options have changed, then we don't need to reset the
221 // operation actions.
222 if (!FirstTimeThrough && TO == TM.Options) return;
224 if (!FirstTimeThrough) {
225 // Reinitialize the actions.
227 FirstTimeThrough = false;
232 // Set up the TargetLowering object.
233 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
235 // X86 is weird. It always uses i8 for shift amounts and setcc results.
236 setBooleanContents(ZeroOrOneBooleanContent);
237 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
238 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
240 // For 64-bit, since we have so many registers, use the ILP scheduler.
241 // For 32-bit, use the register pressure specific scheduling.
242 // For Atom, always use ILP scheduling.
243 if (Subtarget->isAtom())
244 setSchedulingPreference(Sched::ILP);
245 else if (Subtarget->is64Bit())
246 setSchedulingPreference(Sched::ILP);
248 setSchedulingPreference(Sched::RegPressure);
249 const X86RegisterInfo *RegInfo =
250 TM.getSubtarget<X86Subtarget>().getRegisterInfo();
251 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
253 // Bypass expensive divides on Atom when compiling with O2.
254 if (TM.getOptLevel() >= CodeGenOpt::Default) {
255 if (Subtarget->hasSlowDivide32())
256 addBypassSlowDiv(32, 8);
257 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
258 addBypassSlowDiv(64, 16);
261 if (Subtarget->isTargetKnownWindowsMSVC()) {
262 // Setup Windows compiler runtime calls.
263 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
264 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
265 setLibcallName(RTLIB::SREM_I64, "_allrem");
266 setLibcallName(RTLIB::UREM_I64, "_aullrem");
267 setLibcallName(RTLIB::MUL_I64, "_allmul");
268 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
269 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
270 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
271 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
272 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
274 // The _ftol2 runtime function has an unusual calling conv, which
275 // is modeled by a special pseudo-instruction.
276 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
277 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
278 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
279 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
282 if (Subtarget->isTargetDarwin()) {
283 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
284 setUseUnderscoreSetJmp(false);
285 setUseUnderscoreLongJmp(false);
286 } else if (Subtarget->isTargetWindowsGNU()) {
287 // MS runtime is weird: it exports _setjmp, but longjmp!
288 setUseUnderscoreSetJmp(true);
289 setUseUnderscoreLongJmp(false);
291 setUseUnderscoreSetJmp(true);
292 setUseUnderscoreLongJmp(true);
295 // Set up the register classes.
296 addRegisterClass(MVT::i8, &X86::GR8RegClass);
297 addRegisterClass(MVT::i16, &X86::GR16RegClass);
298 addRegisterClass(MVT::i32, &X86::GR32RegClass);
299 if (Subtarget->is64Bit())
300 addRegisterClass(MVT::i64, &X86::GR64RegClass);
302 for (MVT VT : MVT::integer_valuetypes())
303 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
305 // We don't accept any truncstore of integer registers.
306 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
307 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
308 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
309 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
310 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
311 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
313 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
315 // SETOEQ and SETUNE require checking two conditions.
316 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
317 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
318 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
319 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
320 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
321 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
323 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
325 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
326 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
327 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
329 if (Subtarget->is64Bit()) {
330 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
331 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
332 } else if (!TM.Options.UseSoftFloat) {
333 // We have an algorithm for SSE2->double, and we turn this into a
334 // 64-bit FILD followed by conditional FADD for other targets.
335 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
336 // We have an algorithm for SSE2, and we turn this into a 64-bit
337 // FILD for other targets.
338 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
341 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
343 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
344 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
346 if (!TM.Options.UseSoftFloat) {
347 // SSE has no i16 to fp conversion, only i32
348 if (X86ScalarSSEf32) {
349 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
350 // f32 and f64 cases are Legal, f80 case is not
351 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
353 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
354 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
357 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
358 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
361 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
362 // are Legal, f80 is custom lowered.
363 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
364 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
366 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
368 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
369 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
371 if (X86ScalarSSEf32) {
372 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
373 // f32 and f64 cases are Legal, f80 case is not
374 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
376 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
377 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
380 // Handle FP_TO_UINT by promoting the destination to a larger signed
382 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
383 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
384 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
386 if (Subtarget->is64Bit()) {
387 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
388 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
389 } else if (!TM.Options.UseSoftFloat) {
390 // Since AVX is a superset of SSE3, only check for SSE here.
391 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
392 // Expand FP_TO_UINT into a select.
393 // FIXME: We would like to use a Custom expander here eventually to do
394 // the optimal thing for SSE vs. the default expansion in the legalizer.
395 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
397 // With SSE3 we can use fisttpll to convert to a signed i64; without
398 // SSE, we're stuck with a fistpll.
399 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
402 if (isTargetFTOL()) {
403 // Use the _ftol2 runtime function, which has a pseudo-instruction
404 // to handle its weird calling convention.
405 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
408 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
409 if (!X86ScalarSSEf64) {
410 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
411 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
412 if (Subtarget->is64Bit()) {
413 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
414 // Without SSE, i64->f64 goes through memory.
415 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
419 // Scalar integer divide and remainder are lowered to use operations that
420 // produce two results, to match the available instructions. This exposes
421 // the two-result form to trivial CSE, which is able to combine x/y and x%y
422 // into a single instruction.
424 // Scalar integer multiply-high is also lowered to use two-result
425 // operations, to match the available instructions. However, plain multiply
426 // (low) operations are left as Legal, as there are single-result
427 // instructions for this in x86. Using the two-result multiply instructions
428 // when both high and low results are needed must be arranged by dagcombine.
429 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
431 setOperationAction(ISD::MULHS, VT, Expand);
432 setOperationAction(ISD::MULHU, VT, Expand);
433 setOperationAction(ISD::SDIV, VT, Expand);
434 setOperationAction(ISD::UDIV, VT, Expand);
435 setOperationAction(ISD::SREM, VT, Expand);
436 setOperationAction(ISD::UREM, VT, Expand);
438 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
439 setOperationAction(ISD::ADDC, VT, Custom);
440 setOperationAction(ISD::ADDE, VT, Custom);
441 setOperationAction(ISD::SUBC, VT, Custom);
442 setOperationAction(ISD::SUBE, VT, Custom);
445 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
446 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
447 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
448 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
449 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
450 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
451 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
452 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
453 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
454 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
455 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
456 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
457 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
458 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
459 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
460 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
461 if (Subtarget->is64Bit())
462 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
463 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
464 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
465 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
466 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
467 setOperationAction(ISD::FREM , MVT::f32 , Expand);
468 setOperationAction(ISD::FREM , MVT::f64 , Expand);
469 setOperationAction(ISD::FREM , MVT::f80 , Expand);
470 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
472 // Promote the i8 variants and force them on up to i32 which has a shorter
474 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
477 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
478 if (Subtarget->hasBMI()) {
479 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
480 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
481 if (Subtarget->is64Bit())
482 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
484 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
485 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
486 if (Subtarget->is64Bit())
487 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
490 if (Subtarget->hasLZCNT()) {
491 // When promoting the i8 variants, force them to i32 for a shorter
493 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
494 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
495 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
496 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
497 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
498 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
499 if (Subtarget->is64Bit())
500 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
502 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
503 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
504 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
505 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
506 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
507 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
508 if (Subtarget->is64Bit()) {
509 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
510 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
514 // Special handling for half-precision floating point conversions.
515 // If we don't have F16C support, then lower half float conversions
516 // into library calls.
517 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
518 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
519 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
522 // There's never any support for operations beyond MVT::f32.
523 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
524 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
525 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
526 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
528 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
529 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
530 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
531 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
532 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
533 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
535 if (Subtarget->hasPOPCNT()) {
536 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
538 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
539 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
540 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
541 if (Subtarget->is64Bit())
542 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
545 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
547 if (!Subtarget->hasMOVBE())
548 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
550 // These should be promoted to a larger select which is supported.
551 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
552 // X86 wants to expand cmov itself.
553 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
554 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
555 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
556 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
557 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
558 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
559 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
560 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
561 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
562 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
563 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
564 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
565 if (Subtarget->is64Bit()) {
566 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
567 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
569 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
570 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
571 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
572 // support continuation, user-level threading, and etc.. As a result, no
573 // other SjLj exception interfaces are implemented and please don't build
574 // your own exception handling based on them.
575 // LLVM/Clang supports zero-cost DWARF exception handling.
576 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
577 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
580 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
581 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
582 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
583 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
584 if (Subtarget->is64Bit())
585 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
586 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
587 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
588 if (Subtarget->is64Bit()) {
589 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
590 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
591 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
592 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
593 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
595 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
596 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
597 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
598 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
599 if (Subtarget->is64Bit()) {
600 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
601 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
602 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
605 if (Subtarget->hasSSE1())
606 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
608 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
610 // Expand certain atomics
611 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
613 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
614 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
615 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
618 if (Subtarget->hasCmpxchg16b()) {
619 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
622 // FIXME - use subtarget debug flags
623 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
624 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
625 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
628 if (Subtarget->is64Bit()) {
629 setExceptionPointerRegister(X86::RAX);
630 setExceptionSelectorRegister(X86::RDX);
632 setExceptionPointerRegister(X86::EAX);
633 setExceptionSelectorRegister(X86::EDX);
635 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
636 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
638 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
639 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
641 setOperationAction(ISD::TRAP, MVT::Other, Legal);
642 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
644 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
645 setOperationAction(ISD::VASTART , MVT::Other, Custom);
646 setOperationAction(ISD::VAEND , MVT::Other, Expand);
647 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
648 // TargetInfo::X86_64ABIBuiltinVaList
649 setOperationAction(ISD::VAARG , MVT::Other, Custom);
650 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
652 // TargetInfo::CharPtrBuiltinVaList
653 setOperationAction(ISD::VAARG , MVT::Other, Expand);
654 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
657 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
658 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
660 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
662 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
663 // f32 and f64 use SSE.
664 // Set up the FP register classes.
665 addRegisterClass(MVT::f32, &X86::FR32RegClass);
666 addRegisterClass(MVT::f64, &X86::FR64RegClass);
668 // Use ANDPD to simulate FABS.
669 setOperationAction(ISD::FABS , MVT::f64, Custom);
670 setOperationAction(ISD::FABS , MVT::f32, Custom);
672 // Use XORP to simulate FNEG.
673 setOperationAction(ISD::FNEG , MVT::f64, Custom);
674 setOperationAction(ISD::FNEG , MVT::f32, Custom);
676 // Use ANDPD and ORPD to simulate FCOPYSIGN.
677 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
678 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
680 // Lower this to FGETSIGNx86 plus an AND.
681 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
682 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
684 // We don't support sin/cos/fmod
685 setOperationAction(ISD::FSIN , MVT::f64, Expand);
686 setOperationAction(ISD::FCOS , MVT::f64, Expand);
687 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
688 setOperationAction(ISD::FSIN , MVT::f32, Expand);
689 setOperationAction(ISD::FCOS , MVT::f32, Expand);
690 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
692 // Expand FP immediates into loads from the stack, except for the special
694 addLegalFPImmediate(APFloat(+0.0)); // xorpd
695 addLegalFPImmediate(APFloat(+0.0f)); // xorps
696 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
697 // Use SSE for f32, x87 for f64.
698 // Set up the FP register classes.
699 addRegisterClass(MVT::f32, &X86::FR32RegClass);
700 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
702 // Use ANDPS to simulate FABS.
703 setOperationAction(ISD::FABS , MVT::f32, Custom);
705 // Use XORP to simulate FNEG.
706 setOperationAction(ISD::FNEG , MVT::f32, Custom);
708 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
710 // Use ANDPS and ORPS to simulate FCOPYSIGN.
711 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
712 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
714 // We don't support sin/cos/fmod
715 setOperationAction(ISD::FSIN , MVT::f32, Expand);
716 setOperationAction(ISD::FCOS , MVT::f32, Expand);
717 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
719 // Special cases we handle for FP constants.
720 addLegalFPImmediate(APFloat(+0.0f)); // xorps
721 addLegalFPImmediate(APFloat(+0.0)); // FLD0
722 addLegalFPImmediate(APFloat(+1.0)); // FLD1
723 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
724 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
726 if (!TM.Options.UnsafeFPMath) {
727 setOperationAction(ISD::FSIN , MVT::f64, Expand);
728 setOperationAction(ISD::FCOS , MVT::f64, Expand);
729 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
731 } else if (!TM.Options.UseSoftFloat) {
732 // f32 and f64 in x87.
733 // Set up the FP register classes.
734 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
735 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
737 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
738 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
739 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
740 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
742 if (!TM.Options.UnsafeFPMath) {
743 setOperationAction(ISD::FSIN , MVT::f64, Expand);
744 setOperationAction(ISD::FSIN , MVT::f32, Expand);
745 setOperationAction(ISD::FCOS , MVT::f64, Expand);
746 setOperationAction(ISD::FCOS , MVT::f32, Expand);
747 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
748 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
750 addLegalFPImmediate(APFloat(+0.0)); // FLD0
751 addLegalFPImmediate(APFloat(+1.0)); // FLD1
752 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
753 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
754 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
755 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
756 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
757 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
760 // We don't support FMA.
761 setOperationAction(ISD::FMA, MVT::f64, Expand);
762 setOperationAction(ISD::FMA, MVT::f32, Expand);
764 // Long double always uses X87.
765 if (!TM.Options.UseSoftFloat) {
766 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
767 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
768 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
770 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
771 addLegalFPImmediate(TmpFlt); // FLD0
773 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
776 APFloat TmpFlt2(+1.0);
777 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
779 addLegalFPImmediate(TmpFlt2); // FLD1
780 TmpFlt2.changeSign();
781 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
784 if (!TM.Options.UnsafeFPMath) {
785 setOperationAction(ISD::FSIN , MVT::f80, Expand);
786 setOperationAction(ISD::FCOS , MVT::f80, Expand);
787 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
790 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
791 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
792 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
793 setOperationAction(ISD::FRINT, MVT::f80, Expand);
794 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
795 setOperationAction(ISD::FMA, MVT::f80, Expand);
798 // Always use a library call for pow.
799 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
800 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
801 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
803 setOperationAction(ISD::FLOG, MVT::f80, Expand);
804 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
805 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
806 setOperationAction(ISD::FEXP, MVT::f80, Expand);
807 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
808 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
809 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
811 // First set operation action for all vector types to either promote
812 // (for widening) or expand (for scalarization). Then we will selectively
813 // turn on ones that can be effectively codegen'd.
814 for (MVT VT : MVT::vector_valuetypes()) {
815 setOperationAction(ISD::ADD , VT, Expand);
816 setOperationAction(ISD::SUB , VT, Expand);
817 setOperationAction(ISD::FADD, VT, Expand);
818 setOperationAction(ISD::FNEG, VT, Expand);
819 setOperationAction(ISD::FSUB, VT, Expand);
820 setOperationAction(ISD::MUL , VT, Expand);
821 setOperationAction(ISD::FMUL, VT, Expand);
822 setOperationAction(ISD::SDIV, VT, Expand);
823 setOperationAction(ISD::UDIV, VT, Expand);
824 setOperationAction(ISD::FDIV, VT, Expand);
825 setOperationAction(ISD::SREM, VT, Expand);
826 setOperationAction(ISD::UREM, VT, Expand);
827 setOperationAction(ISD::LOAD, VT, Expand);
828 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
829 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
830 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
831 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
832 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
833 setOperationAction(ISD::FABS, VT, Expand);
834 setOperationAction(ISD::FSIN, VT, Expand);
835 setOperationAction(ISD::FSINCOS, VT, Expand);
836 setOperationAction(ISD::FCOS, VT, Expand);
837 setOperationAction(ISD::FSINCOS, VT, Expand);
838 setOperationAction(ISD::FREM, VT, Expand);
839 setOperationAction(ISD::FMA, VT, Expand);
840 setOperationAction(ISD::FPOWI, VT, Expand);
841 setOperationAction(ISD::FSQRT, VT, Expand);
842 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
843 setOperationAction(ISD::FFLOOR, VT, Expand);
844 setOperationAction(ISD::FCEIL, VT, Expand);
845 setOperationAction(ISD::FTRUNC, VT, Expand);
846 setOperationAction(ISD::FRINT, VT, Expand);
847 setOperationAction(ISD::FNEARBYINT, VT, Expand);
848 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
849 setOperationAction(ISD::MULHS, VT, Expand);
850 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
851 setOperationAction(ISD::MULHU, VT, Expand);
852 setOperationAction(ISD::SDIVREM, VT, Expand);
853 setOperationAction(ISD::UDIVREM, VT, Expand);
854 setOperationAction(ISD::FPOW, VT, Expand);
855 setOperationAction(ISD::CTPOP, VT, Expand);
856 setOperationAction(ISD::CTTZ, VT, Expand);
857 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
858 setOperationAction(ISD::CTLZ, VT, Expand);
859 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
860 setOperationAction(ISD::SHL, VT, Expand);
861 setOperationAction(ISD::SRA, VT, Expand);
862 setOperationAction(ISD::SRL, VT, Expand);
863 setOperationAction(ISD::ROTL, VT, Expand);
864 setOperationAction(ISD::ROTR, VT, Expand);
865 setOperationAction(ISD::BSWAP, VT, Expand);
866 setOperationAction(ISD::SETCC, VT, Expand);
867 setOperationAction(ISD::FLOG, VT, Expand);
868 setOperationAction(ISD::FLOG2, VT, Expand);
869 setOperationAction(ISD::FLOG10, VT, Expand);
870 setOperationAction(ISD::FEXP, VT, Expand);
871 setOperationAction(ISD::FEXP2, VT, Expand);
872 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
873 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
874 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
875 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
876 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
877 setOperationAction(ISD::TRUNCATE, VT, Expand);
878 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
879 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
880 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
881 setOperationAction(ISD::VSELECT, VT, Expand);
882 setOperationAction(ISD::SELECT_CC, VT, Expand);
883 for (MVT InnerVT : MVT::vector_valuetypes()) {
884 setTruncStoreAction(InnerVT, VT, Expand);
886 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
887 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
889 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
890 // types, we have to deal with them whether we ask for Expansion or not.
891 // Setting Expand causes its own optimisation problems though, so leave
893 if (VT.getVectorElementType() == MVT::i1)
894 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
898 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
899 // with -msoft-float, disable use of MMX as well.
900 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
901 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
902 // No operations on x86mmx supported, everything uses intrinsics.
905 // MMX-sized vectors (other than x86mmx) are expected to be expanded
906 // into smaller operations.
907 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
908 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
909 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
910 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
911 setOperationAction(ISD::AND, MVT::v8i8, Expand);
912 setOperationAction(ISD::AND, MVT::v4i16, Expand);
913 setOperationAction(ISD::AND, MVT::v2i32, Expand);
914 setOperationAction(ISD::AND, MVT::v1i64, Expand);
915 setOperationAction(ISD::OR, MVT::v8i8, Expand);
916 setOperationAction(ISD::OR, MVT::v4i16, Expand);
917 setOperationAction(ISD::OR, MVT::v2i32, Expand);
918 setOperationAction(ISD::OR, MVT::v1i64, Expand);
919 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
920 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
921 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
922 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
923 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
924 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
925 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
926 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
927 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
928 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
929 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
930 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
931 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
932 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
933 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
934 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
935 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
937 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
938 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
940 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
941 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
942 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
943 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
944 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
945 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
946 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
947 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
948 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
949 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
950 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
951 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
952 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
955 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
956 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
958 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
959 // registers cannot be used even for integer operations.
960 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
961 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
962 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
963 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
965 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
966 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
967 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
968 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
969 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
970 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
971 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
972 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
973 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
974 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
975 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
976 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
977 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
978 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
979 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
980 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
981 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
982 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
983 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
984 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
985 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
986 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
988 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
989 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
990 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
991 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
993 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
994 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
995 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
996 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
997 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
999 // Only provide customized ctpop vector bit twiddling for vector types we
1000 // know to perform better than using the popcnt instructions on each vector
1001 // element. If popcnt isn't supported, always provide the custom version.
1002 if (!Subtarget->hasPOPCNT()) {
1003 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
1004 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
1007 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
1008 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1009 MVT VT = (MVT::SimpleValueType)i;
1010 // Do not attempt to custom lower non-power-of-2 vectors
1011 if (!isPowerOf2_32(VT.getVectorNumElements()))
1013 // Do not attempt to custom lower non-128-bit vectors
1014 if (!VT.is128BitVector())
1016 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1017 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1018 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1021 // We support custom legalizing of sext and anyext loads for specific
1022 // memory vector types which we can load as a scalar (or sequence of
1023 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1024 // loads these must work with a single scalar load.
1025 for (MVT VT : MVT::integer_vector_valuetypes()) {
1026 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1027 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1028 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1029 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1030 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1031 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1032 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1033 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1034 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1037 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1038 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1039 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1040 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1041 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1042 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1044 if (Subtarget->is64Bit()) {
1045 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1046 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1049 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1050 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1051 MVT VT = (MVT::SimpleValueType)i;
1053 // Do not attempt to promote non-128-bit vectors
1054 if (!VT.is128BitVector())
1057 setOperationAction(ISD::AND, VT, Promote);
1058 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1059 setOperationAction(ISD::OR, VT, Promote);
1060 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1061 setOperationAction(ISD::XOR, VT, Promote);
1062 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1063 setOperationAction(ISD::LOAD, VT, Promote);
1064 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1065 setOperationAction(ISD::SELECT, VT, Promote);
1066 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1069 // Custom lower v2i64 and v2f64 selects.
1070 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1071 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1072 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1073 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1075 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1076 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1078 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1079 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1080 // As there is no 64-bit GPR available, we need build a special custom
1081 // sequence to convert from v2i32 to v2f32.
1082 if (!Subtarget->is64Bit())
1083 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1085 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1086 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1088 for (MVT VT : MVT::fp_vector_valuetypes())
1089 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1091 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1092 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1093 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1096 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1097 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1098 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1099 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1100 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1101 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1102 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1103 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1104 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1105 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1106 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1108 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1109 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1110 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1111 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1112 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1113 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1114 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1115 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1116 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1117 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1119 // FIXME: Do we need to handle scalar-to-vector here?
1120 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1122 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1123 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1124 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1125 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1126 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1127 // There is no BLENDI for byte vectors. We don't need to custom lower
1128 // some vselects for now.
1129 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1131 // SSE41 brings specific instructions for doing vector sign extend even in
1132 // cases where we don't have SRA.
1133 for (MVT VT : MVT::integer_vector_valuetypes()) {
1134 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1135 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1136 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1139 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1140 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1141 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1142 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1143 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1144 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1145 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1147 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1148 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1149 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1150 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1151 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1152 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1154 // i8 and i16 vectors are custom because the source register and source
1155 // source memory operand types are not the same width. f32 vectors are
1156 // custom since the immediate controlling the insert encodes additional
1158 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1159 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1160 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1161 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1163 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1164 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1165 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1166 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1168 // FIXME: these should be Legal, but that's only for the case where
1169 // the index is constant. For now custom expand to deal with that.
1170 if (Subtarget->is64Bit()) {
1171 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1172 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1176 if (Subtarget->hasSSE2()) {
1177 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1178 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1180 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1181 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1183 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1184 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1186 // In the customized shift lowering, the legal cases in AVX2 will be
1188 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1189 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1191 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1192 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1194 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1197 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1198 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1199 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1200 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1201 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1202 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1203 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1205 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1206 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1207 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1209 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1210 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1211 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1212 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1213 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1214 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1215 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1216 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1217 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1218 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1219 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1220 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1222 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1223 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1224 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1225 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1226 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1227 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1228 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1229 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1230 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1231 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1232 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1233 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1235 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1236 // even though v8i16 is a legal type.
1237 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1238 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1239 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1241 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1242 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1243 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1245 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1246 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1248 for (MVT VT : MVT::fp_vector_valuetypes())
1249 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1251 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1252 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1254 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1255 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1257 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1258 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1260 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1261 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1262 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1263 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1265 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1266 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1267 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1269 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1270 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1271 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1272 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1274 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1275 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1276 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1277 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1278 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1279 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1280 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1281 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1282 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1283 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1284 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1285 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1287 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1288 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1289 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1290 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1291 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1292 setOperationAction(ISD::FMA, MVT::f32, Legal);
1293 setOperationAction(ISD::FMA, MVT::f64, Legal);
1296 if (Subtarget->hasInt256()) {
1297 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1298 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1299 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1300 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1302 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1303 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1304 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1305 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1307 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1308 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1309 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1310 // Don't lower v32i8 because there is no 128-bit byte mul
1312 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1313 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1314 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1315 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1317 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1318 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1320 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1321 // when we have a 256bit-wide blend with immediate.
1322 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1324 // Only provide customized ctpop vector bit twiddling for vector types we
1325 // know to perform better than using the popcnt instructions on each
1326 // vector element. If popcnt isn't supported, always provide the custom
1328 if (!Subtarget->hasPOPCNT())
1329 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1331 // Custom CTPOP always performs better on natively supported v8i32
1332 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1334 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1335 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1336 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1337 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1338 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1339 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1340 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1342 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1343 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1344 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1345 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1346 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1347 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1349 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1350 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1351 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1352 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1354 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1355 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1356 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1357 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1359 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1360 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1361 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1362 // Don't lower v32i8 because there is no 128-bit byte mul
1365 // In the customized shift lowering, the legal cases in AVX2 will be
1367 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1368 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1370 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1371 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1373 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1375 // Custom lower several nodes for 256-bit types.
1376 for (MVT VT : MVT::vector_valuetypes()) {
1377 if (VT.getScalarSizeInBits() >= 32) {
1378 setOperationAction(ISD::MLOAD, VT, Legal);
1379 setOperationAction(ISD::MSTORE, VT, Legal);
1381 // Extract subvector is special because the value type
1382 // (result) is 128-bit but the source is 256-bit wide.
1383 if (VT.is128BitVector()) {
1384 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1386 // Do not attempt to custom lower other non-256-bit vectors
1387 if (!VT.is256BitVector())
1390 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1391 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1392 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1393 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1394 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1395 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1396 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1399 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1400 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1401 MVT VT = (MVT::SimpleValueType)i;
1403 // Do not attempt to promote non-256-bit vectors
1404 if (!VT.is256BitVector())
1407 setOperationAction(ISD::AND, VT, Promote);
1408 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1409 setOperationAction(ISD::OR, VT, Promote);
1410 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1411 setOperationAction(ISD::XOR, VT, Promote);
1412 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1413 setOperationAction(ISD::LOAD, VT, Promote);
1414 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1415 setOperationAction(ISD::SELECT, VT, Promote);
1416 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1420 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1421 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1422 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1423 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1424 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1426 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1427 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1428 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1430 for (MVT VT : MVT::fp_vector_valuetypes())
1431 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1433 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1434 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1435 setOperationAction(ISD::XOR, MVT::i1, Legal);
1436 setOperationAction(ISD::OR, MVT::i1, Legal);
1437 setOperationAction(ISD::AND, MVT::i1, Legal);
1438 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1439 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1440 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1441 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1442 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1444 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1445 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1446 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1447 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1448 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1449 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1451 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1452 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1453 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1454 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1455 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1456 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1457 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1458 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1460 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1461 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1462 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1463 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1464 if (Subtarget->is64Bit()) {
1465 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1466 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1467 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1468 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1470 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1471 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1472 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1473 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1474 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1475 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1476 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1477 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1478 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1479 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1480 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1481 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1482 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1483 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1485 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1486 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1487 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1488 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1489 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1490 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1491 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1492 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1493 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1494 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1495 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1496 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1497 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1499 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1500 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1501 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1502 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1503 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1504 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1506 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1507 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1509 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1511 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1512 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1513 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1514 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1515 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1516 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1517 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1518 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1519 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1521 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1522 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1524 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1525 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1527 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1529 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1530 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1532 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1533 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1535 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1536 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1538 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1539 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1540 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1541 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1542 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1543 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1545 if (Subtarget->hasCDI()) {
1546 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1547 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1550 // Custom lower several nodes.
1551 for (MVT VT : MVT::vector_valuetypes()) {
1552 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1553 // Extract subvector is special because the value type
1554 // (result) is 256/128-bit but the source is 512-bit wide.
1555 if (VT.is128BitVector() || VT.is256BitVector()) {
1556 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1558 if (VT.getVectorElementType() == MVT::i1)
1559 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1561 // Do not attempt to custom lower other non-512-bit vectors
1562 if (!VT.is512BitVector())
1565 if ( EltSize >= 32) {
1566 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1567 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1568 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1569 setOperationAction(ISD::VSELECT, VT, Legal);
1570 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1571 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1572 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1573 setOperationAction(ISD::MLOAD, VT, Legal);
1574 setOperationAction(ISD::MSTORE, VT, Legal);
1577 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1578 MVT VT = (MVT::SimpleValueType)i;
1580 // Do not attempt to promote non-512-bit vectors.
1581 if (!VT.is512BitVector())
1584 setOperationAction(ISD::SELECT, VT, Promote);
1585 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1589 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1590 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1591 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1593 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1594 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1596 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1597 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1598 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1599 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1600 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1601 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1602 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1603 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1604 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1606 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1607 const MVT VT = (MVT::SimpleValueType)i;
1609 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1611 // Do not attempt to promote non-512-bit vectors.
1612 if (!VT.is512BitVector())
1616 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1617 setOperationAction(ISD::VSELECT, VT, Legal);
1622 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1623 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1624 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1626 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1627 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1628 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1630 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1631 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1632 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1633 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1634 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1635 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1638 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1639 // of this type with custom code.
1640 for (MVT VT : MVT::vector_valuetypes())
1641 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1643 // We want to custom lower some of our intrinsics.
1644 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1645 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1646 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1647 if (!Subtarget->is64Bit())
1648 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1650 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1651 // handle type legalization for these operations here.
1653 // FIXME: We really should do custom legalization for addition and
1654 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1655 // than generic legalization for 64-bit multiplication-with-overflow, though.
1656 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1657 // Add/Sub/Mul with overflow operations are custom lowered.
1659 setOperationAction(ISD::SADDO, VT, Custom);
1660 setOperationAction(ISD::UADDO, VT, Custom);
1661 setOperationAction(ISD::SSUBO, VT, Custom);
1662 setOperationAction(ISD::USUBO, VT, Custom);
1663 setOperationAction(ISD::SMULO, VT, Custom);
1664 setOperationAction(ISD::UMULO, VT, Custom);
1668 if (!Subtarget->is64Bit()) {
1669 // These libcalls are not available in 32-bit.
1670 setLibcallName(RTLIB::SHL_I128, nullptr);
1671 setLibcallName(RTLIB::SRL_I128, nullptr);
1672 setLibcallName(RTLIB::SRA_I128, nullptr);
1675 // Combine sin / cos into one node or libcall if possible.
1676 if (Subtarget->hasSinCos()) {
1677 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1678 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1679 if (Subtarget->isTargetDarwin()) {
1680 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1681 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1682 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1683 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1687 if (Subtarget->isTargetWin64()) {
1688 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1689 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1690 setOperationAction(ISD::SREM, MVT::i128, Custom);
1691 setOperationAction(ISD::UREM, MVT::i128, Custom);
1692 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1693 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1696 // We have target-specific dag combine patterns for the following nodes:
1697 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1698 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1699 setTargetDAGCombine(ISD::VSELECT);
1700 setTargetDAGCombine(ISD::SELECT);
1701 setTargetDAGCombine(ISD::SHL);
1702 setTargetDAGCombine(ISD::SRA);
1703 setTargetDAGCombine(ISD::SRL);
1704 setTargetDAGCombine(ISD::OR);
1705 setTargetDAGCombine(ISD::AND);
1706 setTargetDAGCombine(ISD::ADD);
1707 setTargetDAGCombine(ISD::FADD);
1708 setTargetDAGCombine(ISD::FSUB);
1709 setTargetDAGCombine(ISD::FMA);
1710 setTargetDAGCombine(ISD::SUB);
1711 setTargetDAGCombine(ISD::LOAD);
1712 setTargetDAGCombine(ISD::MLOAD);
1713 setTargetDAGCombine(ISD::STORE);
1714 setTargetDAGCombine(ISD::MSTORE);
1715 setTargetDAGCombine(ISD::ZERO_EXTEND);
1716 setTargetDAGCombine(ISD::ANY_EXTEND);
1717 setTargetDAGCombine(ISD::SIGN_EXTEND);
1718 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1719 setTargetDAGCombine(ISD::TRUNCATE);
1720 setTargetDAGCombine(ISD::SINT_TO_FP);
1721 setTargetDAGCombine(ISD::SETCC);
1722 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1723 setTargetDAGCombine(ISD::BUILD_VECTOR);
1724 if (Subtarget->is64Bit())
1725 setTargetDAGCombine(ISD::MUL);
1726 setTargetDAGCombine(ISD::XOR);
1728 computeRegisterProperties();
1730 // On Darwin, -Os means optimize for size without hurting performance,
1731 // do not reduce the limit.
1732 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1733 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1734 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1735 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1736 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1737 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1738 setPrefLoopAlignment(4); // 2^4 bytes.
1740 // Predictable cmov don't hurt on atom because it's in-order.
1741 PredictableSelectIsExpensive = !Subtarget->isAtom();
1742 EnableExtLdPromotion = true;
1743 setPrefFunctionAlignment(4); // 2^4 bytes.
1745 verifyIntrinsicTables();
1748 // This has so far only been implemented for 64-bit MachO.
1749 bool X86TargetLowering::useLoadStackGuardNode() const {
1750 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1753 TargetLoweringBase::LegalizeTypeAction
1754 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1755 if (ExperimentalVectorWideningLegalization &&
1756 VT.getVectorNumElements() != 1 &&
1757 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1758 return TypeWidenVector;
1760 return TargetLoweringBase::getPreferredVectorAction(VT);
1763 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1765 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1767 const unsigned NumElts = VT.getVectorNumElements();
1768 const EVT EltVT = VT.getVectorElementType();
1769 if (VT.is512BitVector()) {
1770 if (Subtarget->hasAVX512())
1771 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1772 EltVT == MVT::f32 || EltVT == MVT::f64)
1774 case 8: return MVT::v8i1;
1775 case 16: return MVT::v16i1;
1777 if (Subtarget->hasBWI())
1778 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1780 case 32: return MVT::v32i1;
1781 case 64: return MVT::v64i1;
1785 if (VT.is256BitVector() || VT.is128BitVector()) {
1786 if (Subtarget->hasVLX())
1787 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1788 EltVT == MVT::f32 || EltVT == MVT::f64)
1790 case 2: return MVT::v2i1;
1791 case 4: return MVT::v4i1;
1792 case 8: return MVT::v8i1;
1794 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1795 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1797 case 8: return MVT::v8i1;
1798 case 16: return MVT::v16i1;
1799 case 32: return MVT::v32i1;
1803 return VT.changeVectorElementTypeToInteger();
1806 /// Helper for getByValTypeAlignment to determine
1807 /// the desired ByVal argument alignment.
1808 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1811 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1812 if (VTy->getBitWidth() == 128)
1814 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1815 unsigned EltAlign = 0;
1816 getMaxByValAlign(ATy->getElementType(), EltAlign);
1817 if (EltAlign > MaxAlign)
1818 MaxAlign = EltAlign;
1819 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1820 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1821 unsigned EltAlign = 0;
1822 getMaxByValAlign(STy->getElementType(i), EltAlign);
1823 if (EltAlign > MaxAlign)
1824 MaxAlign = EltAlign;
1831 /// Return the desired alignment for ByVal aggregate
1832 /// function arguments in the caller parameter area. For X86, aggregates
1833 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1834 /// are at 4-byte boundaries.
1835 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1836 if (Subtarget->is64Bit()) {
1837 // Max of 8 and alignment of type.
1838 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1845 if (Subtarget->hasSSE1())
1846 getMaxByValAlign(Ty, Align);
1850 /// Returns the target specific optimal type for load
1851 /// and store operations as a result of memset, memcpy, and memmove
1852 /// lowering. If DstAlign is zero that means it's safe to destination
1853 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1854 /// means there isn't a need to check it against alignment requirement,
1855 /// probably because the source does not need to be loaded. If 'IsMemset' is
1856 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1857 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1858 /// source is constant so it does not need to be loaded.
1859 /// It returns EVT::Other if the type should be determined using generic
1860 /// target-independent logic.
1862 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1863 unsigned DstAlign, unsigned SrcAlign,
1864 bool IsMemset, bool ZeroMemset,
1866 MachineFunction &MF) const {
1867 const Function *F = MF.getFunction();
1868 if ((!IsMemset || ZeroMemset) &&
1869 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
1870 Attribute::NoImplicitFloat)) {
1872 (Subtarget->isUnalignedMemAccessFast() ||
1873 ((DstAlign == 0 || DstAlign >= 16) &&
1874 (SrcAlign == 0 || SrcAlign >= 16)))) {
1876 if (Subtarget->hasInt256())
1878 if (Subtarget->hasFp256())
1881 if (Subtarget->hasSSE2())
1883 if (Subtarget->hasSSE1())
1885 } else if (!MemcpyStrSrc && Size >= 8 &&
1886 !Subtarget->is64Bit() &&
1887 Subtarget->hasSSE2()) {
1888 // Do not use f64 to lower memcpy if source is string constant. It's
1889 // better to use i32 to avoid the loads.
1893 if (Subtarget->is64Bit() && Size >= 8)
1898 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1900 return X86ScalarSSEf32;
1901 else if (VT == MVT::f64)
1902 return X86ScalarSSEf64;
1907 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1912 *Fast = Subtarget->isUnalignedMemAccessFast();
1916 /// Return the entry encoding for a jump table in the
1917 /// current function. The returned value is a member of the
1918 /// MachineJumpTableInfo::JTEntryKind enum.
1919 unsigned X86TargetLowering::getJumpTableEncoding() const {
1920 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1922 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1923 Subtarget->isPICStyleGOT())
1924 return MachineJumpTableInfo::EK_Custom32;
1926 // Otherwise, use the normal jump table encoding heuristics.
1927 return TargetLowering::getJumpTableEncoding();
1931 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1932 const MachineBasicBlock *MBB,
1933 unsigned uid,MCContext &Ctx) const{
1934 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1935 Subtarget->isPICStyleGOT());
1936 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1938 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1939 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1942 /// Returns relocation base for the given PIC jumptable.
1943 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1944 SelectionDAG &DAG) const {
1945 if (!Subtarget->is64Bit())
1946 // This doesn't have SDLoc associated with it, but is not really the
1947 // same as a Register.
1948 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1952 /// This returns the relocation base for the given PIC jumptable,
1953 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1954 const MCExpr *X86TargetLowering::
1955 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1956 MCContext &Ctx) const {
1957 // X86-64 uses RIP relative addressing based on the jump table label.
1958 if (Subtarget->isPICStyleRIPRel())
1959 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1961 // Otherwise, the reference is relative to the PIC base.
1962 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1965 // FIXME: Why this routine is here? Move to RegInfo!
1966 std::pair<const TargetRegisterClass*, uint8_t>
1967 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1968 const TargetRegisterClass *RRC = nullptr;
1970 switch (VT.SimpleTy) {
1972 return TargetLowering::findRepresentativeClass(VT);
1973 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1974 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1977 RRC = &X86::VR64RegClass;
1979 case MVT::f32: case MVT::f64:
1980 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1981 case MVT::v4f32: case MVT::v2f64:
1982 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1984 RRC = &X86::VR128RegClass;
1987 return std::make_pair(RRC, Cost);
1990 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1991 unsigned &Offset) const {
1992 if (!Subtarget->isTargetLinux())
1995 if (Subtarget->is64Bit()) {
1996 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1998 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
2010 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2011 unsigned DestAS) const {
2012 assert(SrcAS != DestAS && "Expected different address spaces!");
2014 return SrcAS < 256 && DestAS < 256;
2017 //===----------------------------------------------------------------------===//
2018 // Return Value Calling Convention Implementation
2019 //===----------------------------------------------------------------------===//
2021 #include "X86GenCallingConv.inc"
2024 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2025 MachineFunction &MF, bool isVarArg,
2026 const SmallVectorImpl<ISD::OutputArg> &Outs,
2027 LLVMContext &Context) const {
2028 SmallVector<CCValAssign, 16> RVLocs;
2029 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2030 return CCInfo.CheckReturn(Outs, RetCC_X86);
2033 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2034 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2039 X86TargetLowering::LowerReturn(SDValue Chain,
2040 CallingConv::ID CallConv, bool isVarArg,
2041 const SmallVectorImpl<ISD::OutputArg> &Outs,
2042 const SmallVectorImpl<SDValue> &OutVals,
2043 SDLoc dl, SelectionDAG &DAG) const {
2044 MachineFunction &MF = DAG.getMachineFunction();
2045 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2047 SmallVector<CCValAssign, 16> RVLocs;
2048 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2049 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2052 SmallVector<SDValue, 6> RetOps;
2053 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2054 // Operand #1 = Bytes To Pop
2055 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2058 // Copy the result values into the output registers.
2059 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2060 CCValAssign &VA = RVLocs[i];
2061 assert(VA.isRegLoc() && "Can only return in registers!");
2062 SDValue ValToCopy = OutVals[i];
2063 EVT ValVT = ValToCopy.getValueType();
2065 // Promote values to the appropriate types.
2066 if (VA.getLocInfo() == CCValAssign::SExt)
2067 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2068 else if (VA.getLocInfo() == CCValAssign::ZExt)
2069 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2070 else if (VA.getLocInfo() == CCValAssign::AExt)
2071 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2072 else if (VA.getLocInfo() == CCValAssign::BCvt)
2073 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2075 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2076 "Unexpected FP-extend for return value.");
2078 // If this is x86-64, and we disabled SSE, we can't return FP values,
2079 // or SSE or MMX vectors.
2080 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2081 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2082 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2083 report_fatal_error("SSE register return with SSE disabled");
2085 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2086 // llvm-gcc has never done it right and no one has noticed, so this
2087 // should be OK for now.
2088 if (ValVT == MVT::f64 &&
2089 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2090 report_fatal_error("SSE2 register return with SSE2 disabled");
2092 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2093 // the RET instruction and handled by the FP Stackifier.
2094 if (VA.getLocReg() == X86::FP0 ||
2095 VA.getLocReg() == X86::FP1) {
2096 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2097 // change the value to the FP stack register class.
2098 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2099 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2100 RetOps.push_back(ValToCopy);
2101 // Don't emit a copytoreg.
2105 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2106 // which is returned in RAX / RDX.
2107 if (Subtarget->is64Bit()) {
2108 if (ValVT == MVT::x86mmx) {
2109 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2110 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2111 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2113 // If we don't have SSE2 available, convert to v4f32 so the generated
2114 // register is legal.
2115 if (!Subtarget->hasSSE2())
2116 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2121 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2122 Flag = Chain.getValue(1);
2123 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2126 // The x86-64 ABIs require that for returning structs by value we copy
2127 // the sret argument into %rax/%eax (depending on ABI) for the return.
2128 // Win32 requires us to put the sret argument to %eax as well.
2129 // We saved the argument into a virtual register in the entry block,
2130 // so now we copy the value out and into %rax/%eax.
2131 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() &&
2132 (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
2133 MachineFunction &MF = DAG.getMachineFunction();
2134 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2135 unsigned Reg = FuncInfo->getSRetReturnReg();
2137 "SRetReturnReg should have been set in LowerFormalArguments().");
2138 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
2141 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2142 X86::RAX : X86::EAX;
2143 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2144 Flag = Chain.getValue(1);
2146 // RAX/EAX now acts like a return value.
2147 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2150 RetOps[0] = Chain; // Update chain.
2152 // Add the flag if we have it.
2154 RetOps.push_back(Flag);
2156 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2159 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2160 if (N->getNumValues() != 1)
2162 if (!N->hasNUsesOfValue(1, 0))
2165 SDValue TCChain = Chain;
2166 SDNode *Copy = *N->use_begin();
2167 if (Copy->getOpcode() == ISD::CopyToReg) {
2168 // If the copy has a glue operand, we conservatively assume it isn't safe to
2169 // perform a tail call.
2170 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2172 TCChain = Copy->getOperand(0);
2173 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2176 bool HasRet = false;
2177 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2179 if (UI->getOpcode() != X86ISD::RET_FLAG)
2181 // If we are returning more than one value, we can definitely
2182 // not make a tail call see PR19530
2183 if (UI->getNumOperands() > 4)
2185 if (UI->getNumOperands() == 4 &&
2186 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2199 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2200 ISD::NodeType ExtendKind) const {
2202 // TODO: Is this also valid on 32-bit?
2203 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2204 ReturnMVT = MVT::i8;
2206 ReturnMVT = MVT::i32;
2208 EVT MinVT = getRegisterType(Context, ReturnMVT);
2209 return VT.bitsLT(MinVT) ? MinVT : VT;
2212 /// Lower the result values of a call into the
2213 /// appropriate copies out of appropriate physical registers.
2216 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2217 CallingConv::ID CallConv, bool isVarArg,
2218 const SmallVectorImpl<ISD::InputArg> &Ins,
2219 SDLoc dl, SelectionDAG &DAG,
2220 SmallVectorImpl<SDValue> &InVals) const {
2222 // Assign locations to each value returned by this call.
2223 SmallVector<CCValAssign, 16> RVLocs;
2224 bool Is64Bit = Subtarget->is64Bit();
2225 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2227 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2229 // Copy all of the result registers out of their specified physreg.
2230 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2231 CCValAssign &VA = RVLocs[i];
2232 EVT CopyVT = VA.getValVT();
2234 // If this is x86-64, and we disabled SSE, we can't return FP values
2235 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2236 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2237 report_fatal_error("SSE register return with SSE disabled");
2240 // If we prefer to use the value in xmm registers, copy it out as f80 and
2241 // use a truncate to move it from fp stack reg to xmm reg.
2242 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2243 isScalarFPTypeInSSEReg(VA.getValVT()))
2246 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2247 CopyVT, InFlag).getValue(1);
2248 SDValue Val = Chain.getValue(0);
2250 if (CopyVT != VA.getValVT())
2251 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2252 // This truncation won't change the value.
2253 DAG.getIntPtrConstant(1));
2255 InFlag = Chain.getValue(2);
2256 InVals.push_back(Val);
2262 //===----------------------------------------------------------------------===//
2263 // C & StdCall & Fast Calling Convention implementation
2264 //===----------------------------------------------------------------------===//
2265 // StdCall calling convention seems to be standard for many Windows' API
2266 // routines and around. It differs from C calling convention just a little:
2267 // callee should clean up the stack, not caller. Symbols should be also
2268 // decorated in some fancy way :) It doesn't support any vector arguments.
2269 // For info on fast calling convention see Fast Calling Convention (tail call)
2270 // implementation LowerX86_32FastCCCallTo.
2272 /// CallIsStructReturn - Determines whether a call uses struct return
2274 enum StructReturnType {
2279 static StructReturnType
2280 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2282 return NotStructReturn;
2284 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2285 if (!Flags.isSRet())
2286 return NotStructReturn;
2287 if (Flags.isInReg())
2288 return RegStructReturn;
2289 return StackStructReturn;
2292 /// Determines whether a function uses struct return semantics.
2293 static StructReturnType
2294 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2296 return NotStructReturn;
2298 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2299 if (!Flags.isSRet())
2300 return NotStructReturn;
2301 if (Flags.isInReg())
2302 return RegStructReturn;
2303 return StackStructReturn;
2306 /// Make a copy of an aggregate at address specified by "Src" to address
2307 /// "Dst" with size and alignment information specified by the specific
2308 /// parameter attribute. The copy will be passed as a byval function parameter.
2310 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2311 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2313 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2315 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2316 /*isVolatile*/false, /*AlwaysInline=*/true,
2317 MachinePointerInfo(), MachinePointerInfo());
2320 /// Return true if the calling convention is one that
2321 /// supports tail call optimization.
2322 static bool IsTailCallConvention(CallingConv::ID CC) {
2323 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2324 CC == CallingConv::HiPE);
2327 /// \brief Return true if the calling convention is a C calling convention.
2328 static bool IsCCallConvention(CallingConv::ID CC) {
2329 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2330 CC == CallingConv::X86_64_SysV);
2333 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2334 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2338 CallingConv::ID CalleeCC = CS.getCallingConv();
2339 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2345 /// Return true if the function is being made into
2346 /// a tailcall target by changing its ABI.
2347 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2348 bool GuaranteedTailCallOpt) {
2349 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2353 X86TargetLowering::LowerMemArgument(SDValue Chain,
2354 CallingConv::ID CallConv,
2355 const SmallVectorImpl<ISD::InputArg> &Ins,
2356 SDLoc dl, SelectionDAG &DAG,
2357 const CCValAssign &VA,
2358 MachineFrameInfo *MFI,
2360 // Create the nodes corresponding to a load from this parameter slot.
2361 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2362 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2363 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2364 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2367 // If value is passed by pointer we have address passed instead of the value
2369 if (VA.getLocInfo() == CCValAssign::Indirect)
2370 ValVT = VA.getLocVT();
2372 ValVT = VA.getValVT();
2374 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2375 // changed with more analysis.
2376 // In case of tail call optimization mark all arguments mutable. Since they
2377 // could be overwritten by lowering of arguments in case of a tail call.
2378 if (Flags.isByVal()) {
2379 unsigned Bytes = Flags.getByValSize();
2380 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2381 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2382 return DAG.getFrameIndex(FI, getPointerTy());
2384 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2385 VA.getLocMemOffset(), isImmutable);
2386 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2387 return DAG.getLoad(ValVT, dl, Chain, FIN,
2388 MachinePointerInfo::getFixedStack(FI),
2389 false, false, false, 0);
2393 // FIXME: Get this from tablegen.
2394 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2395 const X86Subtarget *Subtarget) {
2396 assert(Subtarget->is64Bit());
2398 if (Subtarget->isCallingConvWin64(CallConv)) {
2399 static const MCPhysReg GPR64ArgRegsWin64[] = {
2400 X86::RCX, X86::RDX, X86::R8, X86::R9
2402 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2405 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2406 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2408 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2411 // FIXME: Get this from tablegen.
2412 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2413 CallingConv::ID CallConv,
2414 const X86Subtarget *Subtarget) {
2415 assert(Subtarget->is64Bit());
2416 if (Subtarget->isCallingConvWin64(CallConv)) {
2417 // The XMM registers which might contain var arg parameters are shadowed
2418 // in their paired GPR. So we only need to save the GPR to their home
2420 // TODO: __vectorcall will change this.
2424 const Function *Fn = MF.getFunction();
2425 bool NoImplicitFloatOps = Fn->getAttributes().
2426 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
2427 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2428 "SSE register cannot be used when SSE is disabled!");
2429 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2430 !Subtarget->hasSSE1())
2431 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2435 static const MCPhysReg XMMArgRegs64Bit[] = {
2436 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2437 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2439 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2443 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2444 CallingConv::ID CallConv,
2446 const SmallVectorImpl<ISD::InputArg> &Ins,
2449 SmallVectorImpl<SDValue> &InVals)
2451 MachineFunction &MF = DAG.getMachineFunction();
2452 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2454 const Function* Fn = MF.getFunction();
2455 if (Fn->hasExternalLinkage() &&
2456 Subtarget->isTargetCygMing() &&
2457 Fn->getName() == "main")
2458 FuncInfo->setForceFramePointer(true);
2460 MachineFrameInfo *MFI = MF.getFrameInfo();
2461 bool Is64Bit = Subtarget->is64Bit();
2462 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2464 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2465 "Var args not supported with calling convention fastcc, ghc or hipe");
2467 // Assign locations to all of the incoming arguments.
2468 SmallVector<CCValAssign, 16> ArgLocs;
2469 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2471 // Allocate shadow area for Win64
2473 CCInfo.AllocateStack(32, 8);
2475 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2477 unsigned LastVal = ~0U;
2479 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2480 CCValAssign &VA = ArgLocs[i];
2481 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2483 assert(VA.getValNo() != LastVal &&
2484 "Don't support value assigned to multiple locs yet");
2486 LastVal = VA.getValNo();
2488 if (VA.isRegLoc()) {
2489 EVT RegVT = VA.getLocVT();
2490 const TargetRegisterClass *RC;
2491 if (RegVT == MVT::i32)
2492 RC = &X86::GR32RegClass;
2493 else if (Is64Bit && RegVT == MVT::i64)
2494 RC = &X86::GR64RegClass;
2495 else if (RegVT == MVT::f32)
2496 RC = &X86::FR32RegClass;
2497 else if (RegVT == MVT::f64)
2498 RC = &X86::FR64RegClass;
2499 else if (RegVT.is512BitVector())
2500 RC = &X86::VR512RegClass;
2501 else if (RegVT.is256BitVector())
2502 RC = &X86::VR256RegClass;
2503 else if (RegVT.is128BitVector())
2504 RC = &X86::VR128RegClass;
2505 else if (RegVT == MVT::x86mmx)
2506 RC = &X86::VR64RegClass;
2507 else if (RegVT == MVT::i1)
2508 RC = &X86::VK1RegClass;
2509 else if (RegVT == MVT::v8i1)
2510 RC = &X86::VK8RegClass;
2511 else if (RegVT == MVT::v16i1)
2512 RC = &X86::VK16RegClass;
2513 else if (RegVT == MVT::v32i1)
2514 RC = &X86::VK32RegClass;
2515 else if (RegVT == MVT::v64i1)
2516 RC = &X86::VK64RegClass;
2518 llvm_unreachable("Unknown argument type!");
2520 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2521 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2523 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2524 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2526 if (VA.getLocInfo() == CCValAssign::SExt)
2527 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2528 DAG.getValueType(VA.getValVT()));
2529 else if (VA.getLocInfo() == CCValAssign::ZExt)
2530 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2531 DAG.getValueType(VA.getValVT()));
2532 else if (VA.getLocInfo() == CCValAssign::BCvt)
2533 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2535 if (VA.isExtInLoc()) {
2536 // Handle MMX values passed in XMM regs.
2537 if (RegVT.isVector())
2538 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2540 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2543 assert(VA.isMemLoc());
2544 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2547 // If value is passed via pointer - do a load.
2548 if (VA.getLocInfo() == CCValAssign::Indirect)
2549 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2550 MachinePointerInfo(), false, false, false, 0);
2552 InVals.push_back(ArgValue);
2555 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2556 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2557 // The x86-64 ABIs require that for returning structs by value we copy
2558 // the sret argument into %rax/%eax (depending on ABI) for the return.
2559 // Win32 requires us to put the sret argument to %eax as well.
2560 // Save the argument into a virtual register so that we can access it
2561 // from the return points.
2562 if (Ins[i].Flags.isSRet()) {
2563 unsigned Reg = FuncInfo->getSRetReturnReg();
2565 MVT PtrTy = getPointerTy();
2566 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2567 FuncInfo->setSRetReturnReg(Reg);
2569 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2570 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2576 unsigned StackSize = CCInfo.getNextStackOffset();
2577 // Align stack specially for tail calls.
2578 if (FuncIsMadeTailCallSafe(CallConv,
2579 MF.getTarget().Options.GuaranteedTailCallOpt))
2580 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2582 // If the function takes variable number of arguments, make a frame index for
2583 // the start of the first vararg value... for expansion of llvm.va_start. We
2584 // can skip this if there are no va_start calls.
2585 if (MFI->hasVAStart() &&
2586 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2587 CallConv != CallingConv::X86_ThisCall))) {
2588 FuncInfo->setVarArgsFrameIndex(
2589 MFI->CreateFixedObject(1, StackSize, true));
2592 // Figure out if XMM registers are in use.
2593 assert(!(MF.getTarget().Options.UseSoftFloat &&
2594 Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
2595 Attribute::NoImplicitFloat)) &&
2596 "SSE register cannot be used when SSE is disabled!");
2598 // 64-bit calling conventions support varargs and register parameters, so we
2599 // have to do extra work to spill them in the prologue.
2600 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2601 // Find the first unallocated argument registers.
2602 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2603 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2604 unsigned NumIntRegs =
2605 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2606 unsigned NumXMMRegs =
2607 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2608 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2609 "SSE register cannot be used when SSE is disabled!");
2611 // Gather all the live in physical registers.
2612 SmallVector<SDValue, 6> LiveGPRs;
2613 SmallVector<SDValue, 8> LiveXMMRegs;
2615 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2616 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2618 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2620 if (!ArgXMMs.empty()) {
2621 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2622 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2623 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2624 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2625 LiveXMMRegs.push_back(
2626 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2631 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
2632 // Get to the caller-allocated home save location. Add 8 to account
2633 // for the return address.
2634 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2635 FuncInfo->setRegSaveFrameIndex(
2636 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2637 // Fixup to set vararg frame on shadow area (4 x i64).
2639 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2641 // For X86-64, if there are vararg parameters that are passed via
2642 // registers, then we must store them to their spots on the stack so
2643 // they may be loaded by deferencing the result of va_next.
2644 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2645 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2646 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2647 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2650 // Store the integer parameter registers.
2651 SmallVector<SDValue, 8> MemOps;
2652 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2654 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2655 for (SDValue Val : LiveGPRs) {
2656 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2657 DAG.getIntPtrConstant(Offset));
2659 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2660 MachinePointerInfo::getFixedStack(
2661 FuncInfo->getRegSaveFrameIndex(), Offset),
2663 MemOps.push_back(Store);
2667 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2668 // Now store the XMM (fp + vector) parameter registers.
2669 SmallVector<SDValue, 12> SaveXMMOps;
2670 SaveXMMOps.push_back(Chain);
2671 SaveXMMOps.push_back(ALVal);
2672 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2673 FuncInfo->getRegSaveFrameIndex()));
2674 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2675 FuncInfo->getVarArgsFPOffset()));
2676 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2678 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2679 MVT::Other, SaveXMMOps));
2682 if (!MemOps.empty())
2683 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2686 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2687 // Find the largest legal vector type.
2688 MVT VecVT = MVT::Other;
2689 // FIXME: Only some x86_32 calling conventions support AVX512.
2690 if (Subtarget->hasAVX512() &&
2691 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2692 CallConv == CallingConv::Intel_OCL_BI)))
2693 VecVT = MVT::v16f32;
2694 else if (Subtarget->hasAVX())
2696 else if (Subtarget->hasSSE2())
2699 // We forward some GPRs and some vector types.
2700 SmallVector<MVT, 2> RegParmTypes;
2701 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2702 RegParmTypes.push_back(IntVT);
2703 if (VecVT != MVT::Other)
2704 RegParmTypes.push_back(VecVT);
2706 // Compute the set of forwarded registers. The rest are scratch.
2707 SmallVectorImpl<ForwardedRegister> &Forwards =
2708 FuncInfo->getForwardedMustTailRegParms();
2709 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2711 // Conservatively forward AL on x86_64, since it might be used for varargs.
2712 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2713 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2714 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2717 // Copy all forwards from physical to virtual registers.
2718 for (ForwardedRegister &F : Forwards) {
2719 // FIXME: Can we use a less constrained schedule?
2720 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2721 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2722 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2726 // Some CCs need callee pop.
2727 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2728 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2729 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2731 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2732 // If this is an sret function, the return should pop the hidden pointer.
2733 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2734 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2735 argsAreStructReturn(Ins) == StackStructReturn)
2736 FuncInfo->setBytesToPopOnReturn(4);
2740 // RegSaveFrameIndex is X86-64 only.
2741 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2742 if (CallConv == CallingConv::X86_FastCall ||
2743 CallConv == CallingConv::X86_ThisCall)
2744 // fastcc functions can't have varargs.
2745 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2748 FuncInfo->setArgumentStackSize(StackSize);
2754 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2755 SDValue StackPtr, SDValue Arg,
2756 SDLoc dl, SelectionDAG &DAG,
2757 const CCValAssign &VA,
2758 ISD::ArgFlagsTy Flags) const {
2759 unsigned LocMemOffset = VA.getLocMemOffset();
2760 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2761 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2762 if (Flags.isByVal())
2763 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2765 return DAG.getStore(Chain, dl, Arg, PtrOff,
2766 MachinePointerInfo::getStack(LocMemOffset),
2770 /// Emit a load of return address if tail call
2771 /// optimization is performed and it is required.
2773 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2774 SDValue &OutRetAddr, SDValue Chain,
2775 bool IsTailCall, bool Is64Bit,
2776 int FPDiff, SDLoc dl) const {
2777 // Adjust the Return address stack slot.
2778 EVT VT = getPointerTy();
2779 OutRetAddr = getReturnAddressFrameIndex(DAG);
2781 // Load the "old" Return address.
2782 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2783 false, false, false, 0);
2784 return SDValue(OutRetAddr.getNode(), 1);
2787 /// Emit a store of the return address if tail call
2788 /// optimization is performed and it is required (FPDiff!=0).
2789 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2790 SDValue Chain, SDValue RetAddrFrIdx,
2791 EVT PtrVT, unsigned SlotSize,
2792 int FPDiff, SDLoc dl) {
2793 // Store the return address to the appropriate stack slot.
2794 if (!FPDiff) return Chain;
2795 // Calculate the new stack slot for the return address.
2796 int NewReturnAddrFI =
2797 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2799 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2800 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2801 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2807 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2808 SmallVectorImpl<SDValue> &InVals) const {
2809 SelectionDAG &DAG = CLI.DAG;
2811 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2812 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2813 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2814 SDValue Chain = CLI.Chain;
2815 SDValue Callee = CLI.Callee;
2816 CallingConv::ID CallConv = CLI.CallConv;
2817 bool &isTailCall = CLI.IsTailCall;
2818 bool isVarArg = CLI.IsVarArg;
2820 MachineFunction &MF = DAG.getMachineFunction();
2821 bool Is64Bit = Subtarget->is64Bit();
2822 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2823 StructReturnType SR = callIsStructReturn(Outs);
2824 bool IsSibcall = false;
2825 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2827 if (MF.getTarget().Options.DisableTailCalls)
2830 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2832 // Force this to be a tail call. The verifier rules are enough to ensure
2833 // that we can lower this successfully without moving the return address
2836 } else if (isTailCall) {
2837 // Check if it's really possible to do a tail call.
2838 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2839 isVarArg, SR != NotStructReturn,
2840 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2841 Outs, OutVals, Ins, DAG);
2843 // Sibcalls are automatically detected tailcalls which do not require
2845 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2852 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2853 "Var args not supported with calling convention fastcc, ghc or hipe");
2855 // Analyze operands of the call, assigning locations to each operand.
2856 SmallVector<CCValAssign, 16> ArgLocs;
2857 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2859 // Allocate shadow area for Win64
2861 CCInfo.AllocateStack(32, 8);
2863 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2865 // Get a count of how many bytes are to be pushed on the stack.
2866 unsigned NumBytes = CCInfo.getNextStackOffset();
2868 // This is a sibcall. The memory operands are available in caller's
2869 // own caller's stack.
2871 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2872 IsTailCallConvention(CallConv))
2873 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2876 if (isTailCall && !IsSibcall && !IsMustTail) {
2877 // Lower arguments at fp - stackoffset + fpdiff.
2878 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2880 FPDiff = NumBytesCallerPushed - NumBytes;
2882 // Set the delta of movement of the returnaddr stackslot.
2883 // But only set if delta is greater than previous delta.
2884 if (FPDiff < X86Info->getTCReturnAddrDelta())
2885 X86Info->setTCReturnAddrDelta(FPDiff);
2888 unsigned NumBytesToPush = NumBytes;
2889 unsigned NumBytesToPop = NumBytes;
2891 // If we have an inalloca argument, all stack space has already been allocated
2892 // for us and be right at the top of the stack. We don't support multiple
2893 // arguments passed in memory when using inalloca.
2894 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2896 if (!ArgLocs.back().isMemLoc())
2897 report_fatal_error("cannot use inalloca attribute on a register "
2899 if (ArgLocs.back().getLocMemOffset() != 0)
2900 report_fatal_error("any parameter with the inalloca attribute must be "
2901 "the only memory argument");
2905 Chain = DAG.getCALLSEQ_START(
2906 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2908 SDValue RetAddrFrIdx;
2909 // Load return address for tail calls.
2910 if (isTailCall && FPDiff)
2911 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2912 Is64Bit, FPDiff, dl);
2914 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2915 SmallVector<SDValue, 8> MemOpChains;
2918 // Walk the register/memloc assignments, inserting copies/loads. In the case
2919 // of tail call optimization arguments are handle later.
2920 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
2921 DAG.getSubtarget().getRegisterInfo());
2922 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2923 // Skip inalloca arguments, they have already been written.
2924 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2925 if (Flags.isInAlloca())
2928 CCValAssign &VA = ArgLocs[i];
2929 EVT RegVT = VA.getLocVT();
2930 SDValue Arg = OutVals[i];
2931 bool isByVal = Flags.isByVal();
2933 // Promote the value if needed.
2934 switch (VA.getLocInfo()) {
2935 default: llvm_unreachable("Unknown loc info!");
2936 case CCValAssign::Full: break;
2937 case CCValAssign::SExt:
2938 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2940 case CCValAssign::ZExt:
2941 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2943 case CCValAssign::AExt:
2944 if (RegVT.is128BitVector()) {
2945 // Special case: passing MMX values in XMM registers.
2946 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2947 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2948 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2950 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2952 case CCValAssign::BCvt:
2953 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2955 case CCValAssign::Indirect: {
2956 // Store the argument.
2957 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2958 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2959 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2960 MachinePointerInfo::getFixedStack(FI),
2967 if (VA.isRegLoc()) {
2968 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2969 if (isVarArg && IsWin64) {
2970 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2971 // shadow reg if callee is a varargs function.
2972 unsigned ShadowReg = 0;
2973 switch (VA.getLocReg()) {
2974 case X86::XMM0: ShadowReg = X86::RCX; break;
2975 case X86::XMM1: ShadowReg = X86::RDX; break;
2976 case X86::XMM2: ShadowReg = X86::R8; break;
2977 case X86::XMM3: ShadowReg = X86::R9; break;
2980 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2982 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2983 assert(VA.isMemLoc());
2984 if (!StackPtr.getNode())
2985 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2987 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2988 dl, DAG, VA, Flags));
2992 if (!MemOpChains.empty())
2993 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2995 if (Subtarget->isPICStyleGOT()) {
2996 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2999 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
3000 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
3002 // If we are tail calling and generating PIC/GOT style code load the
3003 // address of the callee into ECX. The value in ecx is used as target of
3004 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3005 // for tail calls on PIC/GOT architectures. Normally we would just put the
3006 // address of GOT into ebx and then call target@PLT. But for tail calls
3007 // ebx would be restored (since ebx is callee saved) before jumping to the
3010 // Note: The actual moving to ECX is done further down.
3011 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3012 if (G && !G->getGlobal()->hasHiddenVisibility() &&
3013 !G->getGlobal()->hasProtectedVisibility())
3014 Callee = LowerGlobalAddress(Callee, DAG);
3015 else if (isa<ExternalSymbolSDNode>(Callee))
3016 Callee = LowerExternalSymbol(Callee, DAG);
3020 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3021 // From AMD64 ABI document:
3022 // For calls that may call functions that use varargs or stdargs
3023 // (prototype-less calls or calls to functions containing ellipsis (...) in
3024 // the declaration) %al is used as hidden argument to specify the number
3025 // of SSE registers used. The contents of %al do not need to match exactly
3026 // the number of registers, but must be an ubound on the number of SSE
3027 // registers used and is in the range 0 - 8 inclusive.
3029 // Count the number of XMM registers allocated.
3030 static const MCPhysReg XMMArgRegs[] = {
3031 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3032 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3034 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3035 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3036 && "SSE registers cannot be used when SSE is disabled");
3038 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3039 DAG.getConstant(NumXMMRegs, MVT::i8)));
3042 if (isVarArg && IsMustTail) {
3043 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3044 for (const auto &F : Forwards) {
3045 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3046 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3050 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3051 // don't need this because the eligibility check rejects calls that require
3052 // shuffling arguments passed in memory.
3053 if (!IsSibcall && isTailCall) {
3054 // Force all the incoming stack arguments to be loaded from the stack
3055 // before any new outgoing arguments are stored to the stack, because the
3056 // outgoing stack slots may alias the incoming argument stack slots, and
3057 // the alias isn't otherwise explicit. This is slightly more conservative
3058 // than necessary, because it means that each store effectively depends
3059 // on every argument instead of just those arguments it would clobber.
3060 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3062 SmallVector<SDValue, 8> MemOpChains2;
3065 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3066 CCValAssign &VA = ArgLocs[i];
3069 assert(VA.isMemLoc());
3070 SDValue Arg = OutVals[i];
3071 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3072 // Skip inalloca arguments. They don't require any work.
3073 if (Flags.isInAlloca())
3075 // Create frame index.
3076 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3077 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3078 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3079 FIN = DAG.getFrameIndex(FI, getPointerTy());
3081 if (Flags.isByVal()) {
3082 // Copy relative to framepointer.
3083 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3084 if (!StackPtr.getNode())
3085 StackPtr = DAG.getCopyFromReg(Chain, dl,
3086 RegInfo->getStackRegister(),
3088 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3090 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3094 // Store relative to framepointer.
3095 MemOpChains2.push_back(
3096 DAG.getStore(ArgChain, dl, Arg, FIN,
3097 MachinePointerInfo::getFixedStack(FI),
3102 if (!MemOpChains2.empty())
3103 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3105 // Store the return address to the appropriate stack slot.
3106 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3107 getPointerTy(), RegInfo->getSlotSize(),
3111 // Build a sequence of copy-to-reg nodes chained together with token chain
3112 // and flag operands which copy the outgoing args into registers.
3114 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3115 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3116 RegsToPass[i].second, InFlag);
3117 InFlag = Chain.getValue(1);
3120 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3121 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3122 // In the 64-bit large code model, we have to make all calls
3123 // through a register, since the call instruction's 32-bit
3124 // pc-relative offset may not be large enough to hold the whole
3126 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3127 // If the callee is a GlobalAddress node (quite common, every direct call
3128 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3130 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3132 // We should use extra load for direct calls to dllimported functions in
3134 const GlobalValue *GV = G->getGlobal();
3135 if (!GV->hasDLLImportStorageClass()) {
3136 unsigned char OpFlags = 0;
3137 bool ExtraLoad = false;
3138 unsigned WrapperKind = ISD::DELETED_NODE;
3140 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3141 // external symbols most go through the PLT in PIC mode. If the symbol
3142 // has hidden or protected visibility, or if it is static or local, then
3143 // we don't need to use the PLT - we can directly call it.
3144 if (Subtarget->isTargetELF() &&
3145 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3146 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3147 OpFlags = X86II::MO_PLT;
3148 } else if (Subtarget->isPICStyleStubAny() &&
3149 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3150 (!Subtarget->getTargetTriple().isMacOSX() ||
3151 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3152 // PC-relative references to external symbols should go through $stub,
3153 // unless we're building with the leopard linker or later, which
3154 // automatically synthesizes these stubs.
3155 OpFlags = X86II::MO_DARWIN_STUB;
3156 } else if (Subtarget->isPICStyleRIPRel() &&
3157 isa<Function>(GV) &&
3158 cast<Function>(GV)->getAttributes().
3159 hasAttribute(AttributeSet::FunctionIndex,
3160 Attribute::NonLazyBind)) {
3161 // If the function is marked as non-lazy, generate an indirect call
3162 // which loads from the GOT directly. This avoids runtime overhead
3163 // at the cost of eager binding (and one extra byte of encoding).
3164 OpFlags = X86II::MO_GOTPCREL;
3165 WrapperKind = X86ISD::WrapperRIP;
3169 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3170 G->getOffset(), OpFlags);
3172 // Add a wrapper if needed.
3173 if (WrapperKind != ISD::DELETED_NODE)
3174 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3175 // Add extra indirection if needed.
3177 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3178 MachinePointerInfo::getGOT(),
3179 false, false, false, 0);
3181 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3182 unsigned char OpFlags = 0;
3184 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3185 // external symbols should go through the PLT.
3186 if (Subtarget->isTargetELF() &&
3187 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3188 OpFlags = X86II::MO_PLT;
3189 } else if (Subtarget->isPICStyleStubAny() &&
3190 (!Subtarget->getTargetTriple().isMacOSX() ||
3191 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3192 // PC-relative references to external symbols should go through $stub,
3193 // unless we're building with the leopard linker or later, which
3194 // automatically synthesizes these stubs.
3195 OpFlags = X86II::MO_DARWIN_STUB;
3198 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3200 } else if (Subtarget->isTarget64BitILP32() && Callee->getValueType(0) == MVT::i32) {
3201 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3202 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3205 // Returns a chain & a flag for retval copy to use.
3206 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3207 SmallVector<SDValue, 8> Ops;
3209 if (!IsSibcall && isTailCall) {
3210 Chain = DAG.getCALLSEQ_END(Chain,
3211 DAG.getIntPtrConstant(NumBytesToPop, true),
3212 DAG.getIntPtrConstant(0, true), InFlag, dl);
3213 InFlag = Chain.getValue(1);
3216 Ops.push_back(Chain);
3217 Ops.push_back(Callee);
3220 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3222 // Add argument registers to the end of the list so that they are known live
3224 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3225 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3226 RegsToPass[i].second.getValueType()));
3228 // Add a register mask operand representing the call-preserved registers.
3229 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
3230 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3231 assert(Mask && "Missing call preserved mask for calling convention");
3232 Ops.push_back(DAG.getRegisterMask(Mask));
3234 if (InFlag.getNode())
3235 Ops.push_back(InFlag);
3239 //// If this is the first return lowered for this function, add the regs
3240 //// to the liveout set for the function.
3241 // This isn't right, although it's probably harmless on x86; liveouts
3242 // should be computed from returns not tail calls. Consider a void
3243 // function making a tail call to a function returning int.
3244 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3247 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3248 InFlag = Chain.getValue(1);
3250 // Create the CALLSEQ_END node.
3251 unsigned NumBytesForCalleeToPop;
3252 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3253 DAG.getTarget().Options.GuaranteedTailCallOpt))
3254 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3255 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3256 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3257 SR == StackStructReturn)
3258 // If this is a call to a struct-return function, the callee
3259 // pops the hidden struct pointer, so we have to push it back.
3260 // This is common for Darwin/X86, Linux & Mingw32 targets.
3261 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3262 NumBytesForCalleeToPop = 4;
3264 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3266 // Returns a flag for retval copy to use.
3268 Chain = DAG.getCALLSEQ_END(Chain,
3269 DAG.getIntPtrConstant(NumBytesToPop, true),
3270 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3273 InFlag = Chain.getValue(1);
3276 // Handle result values, copying them out of physregs into vregs that we
3278 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3279 Ins, dl, DAG, InVals);
3282 //===----------------------------------------------------------------------===//
3283 // Fast Calling Convention (tail call) implementation
3284 //===----------------------------------------------------------------------===//
3286 // Like std call, callee cleans arguments, convention except that ECX is
3287 // reserved for storing the tail called function address. Only 2 registers are
3288 // free for argument passing (inreg). Tail call optimization is performed
3290 // * tailcallopt is enabled
3291 // * caller/callee are fastcc
3292 // On X86_64 architecture with GOT-style position independent code only local
3293 // (within module) calls are supported at the moment.
3294 // To keep the stack aligned according to platform abi the function
3295 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3296 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3297 // If a tail called function callee has more arguments than the caller the
3298 // caller needs to make sure that there is room to move the RETADDR to. This is
3299 // achieved by reserving an area the size of the argument delta right after the
3300 // original RETADDR, but before the saved framepointer or the spilled registers
3301 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3313 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3314 /// for a 16 byte align requirement.
3316 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3317 SelectionDAG& DAG) const {
3318 MachineFunction &MF = DAG.getMachineFunction();
3319 const TargetMachine &TM = MF.getTarget();
3320 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3321 TM.getSubtargetImpl()->getRegisterInfo());
3322 const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
3323 unsigned StackAlignment = TFI.getStackAlignment();
3324 uint64_t AlignMask = StackAlignment - 1;
3325 int64_t Offset = StackSize;
3326 unsigned SlotSize = RegInfo->getSlotSize();
3327 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3328 // Number smaller than 12 so just add the difference.
3329 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3331 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3332 Offset = ((~AlignMask) & Offset) + StackAlignment +
3333 (StackAlignment-SlotSize);
3338 /// MatchingStackOffset - Return true if the given stack call argument is
3339 /// already available in the same position (relatively) of the caller's
3340 /// incoming argument stack.
3342 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3343 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3344 const X86InstrInfo *TII) {
3345 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3347 if (Arg.getOpcode() == ISD::CopyFromReg) {
3348 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3349 if (!TargetRegisterInfo::isVirtualRegister(VR))
3351 MachineInstr *Def = MRI->getVRegDef(VR);
3354 if (!Flags.isByVal()) {
3355 if (!TII->isLoadFromStackSlot(Def, FI))
3358 unsigned Opcode = Def->getOpcode();
3359 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) &&
3360 Def->getOperand(1).isFI()) {
3361 FI = Def->getOperand(1).getIndex();
3362 Bytes = Flags.getByValSize();
3366 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3367 if (Flags.isByVal())
3368 // ByVal argument is passed in as a pointer but it's now being
3369 // dereferenced. e.g.
3370 // define @foo(%struct.X* %A) {
3371 // tail call @bar(%struct.X* byval %A)
3374 SDValue Ptr = Ld->getBasePtr();
3375 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3378 FI = FINode->getIndex();
3379 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3380 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3381 FI = FINode->getIndex();
3382 Bytes = Flags.getByValSize();
3386 assert(FI != INT_MAX);
3387 if (!MFI->isFixedObjectIndex(FI))
3389 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3392 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3393 /// for tail call optimization. Targets which want to do tail call
3394 /// optimization should implement this function.
3396 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3397 CallingConv::ID CalleeCC,
3399 bool isCalleeStructRet,
3400 bool isCallerStructRet,
3402 const SmallVectorImpl<ISD::OutputArg> &Outs,
3403 const SmallVectorImpl<SDValue> &OutVals,
3404 const SmallVectorImpl<ISD::InputArg> &Ins,
3405 SelectionDAG &DAG) const {
3406 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3409 // If -tailcallopt is specified, make fastcc functions tail-callable.
3410 const MachineFunction &MF = DAG.getMachineFunction();
3411 const Function *CallerF = MF.getFunction();
3413 // If the function return type is x86_fp80 and the callee return type is not,
3414 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3415 // perform a tailcall optimization here.
3416 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3419 CallingConv::ID CallerCC = CallerF->getCallingConv();
3420 bool CCMatch = CallerCC == CalleeCC;
3421 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3422 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3424 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3425 if (IsTailCallConvention(CalleeCC) && CCMatch)
3430 // Look for obvious safe cases to perform tail call optimization that do not
3431 // require ABI changes. This is what gcc calls sibcall.
3433 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3434 // emit a special epilogue.
3435 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3436 DAG.getSubtarget().getRegisterInfo());
3437 if (RegInfo->needsStackRealignment(MF))
3440 // Also avoid sibcall optimization if either caller or callee uses struct
3441 // return semantics.
3442 if (isCalleeStructRet || isCallerStructRet)
3445 // An stdcall/thiscall caller is expected to clean up its arguments; the
3446 // callee isn't going to do that.
3447 // FIXME: this is more restrictive than needed. We could produce a tailcall
3448 // when the stack adjustment matches. For example, with a thiscall that takes
3449 // only one argument.
3450 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3451 CallerCC == CallingConv::X86_ThisCall))
3454 // Do not sibcall optimize vararg calls unless all arguments are passed via
3456 if (isVarArg && !Outs.empty()) {
3458 // Optimizing for varargs on Win64 is unlikely to be safe without
3459 // additional testing.
3460 if (IsCalleeWin64 || IsCallerWin64)
3463 SmallVector<CCValAssign, 16> ArgLocs;
3464 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3467 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3468 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3469 if (!ArgLocs[i].isRegLoc())
3473 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3474 // stack. Therefore, if it's not used by the call it is not safe to optimize
3475 // this into a sibcall.
3476 bool Unused = false;
3477 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3484 SmallVector<CCValAssign, 16> RVLocs;
3485 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3487 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3488 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3489 CCValAssign &VA = RVLocs[i];
3490 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3495 // If the calling conventions do not match, then we'd better make sure the
3496 // results are returned in the same way as what the caller expects.
3498 SmallVector<CCValAssign, 16> RVLocs1;
3499 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3501 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3503 SmallVector<CCValAssign, 16> RVLocs2;
3504 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3506 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3508 if (RVLocs1.size() != RVLocs2.size())
3510 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3511 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3513 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3515 if (RVLocs1[i].isRegLoc()) {
3516 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3519 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3525 // If the callee takes no arguments then go on to check the results of the
3527 if (!Outs.empty()) {
3528 // Check if stack adjustment is needed. For now, do not do this if any
3529 // argument is passed on the stack.
3530 SmallVector<CCValAssign, 16> ArgLocs;
3531 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3534 // Allocate shadow area for Win64
3536 CCInfo.AllocateStack(32, 8);
3538 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3539 if (CCInfo.getNextStackOffset()) {
3540 MachineFunction &MF = DAG.getMachineFunction();
3541 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3544 // Check if the arguments are already laid out in the right way as
3545 // the caller's fixed stack objects.
3546 MachineFrameInfo *MFI = MF.getFrameInfo();
3547 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3548 const X86InstrInfo *TII =
3549 static_cast<const X86InstrInfo *>(DAG.getSubtarget().getInstrInfo());
3550 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3551 CCValAssign &VA = ArgLocs[i];
3552 SDValue Arg = OutVals[i];
3553 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3554 if (VA.getLocInfo() == CCValAssign::Indirect)
3556 if (!VA.isRegLoc()) {
3557 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3564 // If the tailcall address may be in a register, then make sure it's
3565 // possible to register allocate for it. In 32-bit, the call address can
3566 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3567 // callee-saved registers are restored. These happen to be the same
3568 // registers used to pass 'inreg' arguments so watch out for those.
3569 if (!Subtarget->is64Bit() &&
3570 ((!isa<GlobalAddressSDNode>(Callee) &&
3571 !isa<ExternalSymbolSDNode>(Callee)) ||
3572 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3573 unsigned NumInRegs = 0;
3574 // In PIC we need an extra register to formulate the address computation
3576 unsigned MaxInRegs =
3577 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3579 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3580 CCValAssign &VA = ArgLocs[i];
3583 unsigned Reg = VA.getLocReg();
3586 case X86::EAX: case X86::EDX: case X86::ECX:
3587 if (++NumInRegs == MaxInRegs)
3599 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3600 const TargetLibraryInfo *libInfo) const {
3601 return X86::createFastISel(funcInfo, libInfo);
3604 //===----------------------------------------------------------------------===//
3605 // Other Lowering Hooks
3606 //===----------------------------------------------------------------------===//
3608 static bool MayFoldLoad(SDValue Op) {
3609 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3612 static bool MayFoldIntoStore(SDValue Op) {
3613 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3616 static bool isTargetShuffle(unsigned Opcode) {
3618 default: return false;
3619 case X86ISD::BLENDI:
3620 case X86ISD::PSHUFB:
3621 case X86ISD::PSHUFD:
3622 case X86ISD::PSHUFHW:
3623 case X86ISD::PSHUFLW:
3625 case X86ISD::PALIGNR:
3626 case X86ISD::MOVLHPS:
3627 case X86ISD::MOVLHPD:
3628 case X86ISD::MOVHLPS:
3629 case X86ISD::MOVLPS:
3630 case X86ISD::MOVLPD:
3631 case X86ISD::MOVSHDUP:
3632 case X86ISD::MOVSLDUP:
3633 case X86ISD::MOVDDUP:
3636 case X86ISD::UNPCKL:
3637 case X86ISD::UNPCKH:
3638 case X86ISD::VPERMILPI:
3639 case X86ISD::VPERM2X128:
3640 case X86ISD::VPERMI:
3645 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3646 SDValue V1, SelectionDAG &DAG) {
3648 default: llvm_unreachable("Unknown x86 shuffle node");
3649 case X86ISD::MOVSHDUP:
3650 case X86ISD::MOVSLDUP:
3651 case X86ISD::MOVDDUP:
3652 return DAG.getNode(Opc, dl, VT, V1);
3656 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3657 SDValue V1, unsigned TargetMask,
3658 SelectionDAG &DAG) {
3660 default: llvm_unreachable("Unknown x86 shuffle node");
3661 case X86ISD::PSHUFD:
3662 case X86ISD::PSHUFHW:
3663 case X86ISD::PSHUFLW:
3664 case X86ISD::VPERMILPI:
3665 case X86ISD::VPERMI:
3666 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3670 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3671 SDValue V1, SDValue V2, unsigned TargetMask,
3672 SelectionDAG &DAG) {
3674 default: llvm_unreachable("Unknown x86 shuffle node");
3675 case X86ISD::PALIGNR:
3676 case X86ISD::VALIGN:
3678 case X86ISD::VPERM2X128:
3679 return DAG.getNode(Opc, dl, VT, V1, V2,
3680 DAG.getConstant(TargetMask, MVT::i8));
3684 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3685 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3687 default: llvm_unreachable("Unknown x86 shuffle node");
3688 case X86ISD::MOVLHPS:
3689 case X86ISD::MOVLHPD:
3690 case X86ISD::MOVHLPS:
3691 case X86ISD::MOVLPS:
3692 case X86ISD::MOVLPD:
3695 case X86ISD::UNPCKL:
3696 case X86ISD::UNPCKH:
3697 return DAG.getNode(Opc, dl, VT, V1, V2);
3701 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3702 MachineFunction &MF = DAG.getMachineFunction();
3703 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3704 DAG.getSubtarget().getRegisterInfo());
3705 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3706 int ReturnAddrIndex = FuncInfo->getRAIndex();
3708 if (ReturnAddrIndex == 0) {
3709 // Set up a frame object for the return address.
3710 unsigned SlotSize = RegInfo->getSlotSize();
3711 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3714 FuncInfo->setRAIndex(ReturnAddrIndex);
3717 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3720 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3721 bool hasSymbolicDisplacement) {
3722 // Offset should fit into 32 bit immediate field.
3723 if (!isInt<32>(Offset))
3726 // If we don't have a symbolic displacement - we don't have any extra
3728 if (!hasSymbolicDisplacement)
3731 // FIXME: Some tweaks might be needed for medium code model.
3732 if (M != CodeModel::Small && M != CodeModel::Kernel)
3735 // For small code model we assume that latest object is 16MB before end of 31
3736 // bits boundary. We may also accept pretty large negative constants knowing
3737 // that all objects are in the positive half of address space.
3738 if (M == CodeModel::Small && Offset < 16*1024*1024)
3741 // For kernel code model we know that all object resist in the negative half
3742 // of 32bits address space. We may not accept negative offsets, since they may
3743 // be just off and we may accept pretty large positive ones.
3744 if (M == CodeModel::Kernel && Offset >= 0)
3750 /// isCalleePop - Determines whether the callee is required to pop its
3751 /// own arguments. Callee pop is necessary to support tail calls.
3752 bool X86::isCalleePop(CallingConv::ID CallingConv,
3753 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3754 switch (CallingConv) {
3757 case CallingConv::X86_StdCall:
3758 case CallingConv::X86_FastCall:
3759 case CallingConv::X86_ThisCall:
3761 case CallingConv::Fast:
3762 case CallingConv::GHC:
3763 case CallingConv::HiPE:
3770 /// \brief Return true if the condition is an unsigned comparison operation.
3771 static bool isX86CCUnsigned(unsigned X86CC) {
3773 default: llvm_unreachable("Invalid integer condition!");
3774 case X86::COND_E: return true;
3775 case X86::COND_G: return false;
3776 case X86::COND_GE: return false;
3777 case X86::COND_L: return false;
3778 case X86::COND_LE: return false;
3779 case X86::COND_NE: return true;
3780 case X86::COND_B: return true;
3781 case X86::COND_A: return true;
3782 case X86::COND_BE: return true;
3783 case X86::COND_AE: return true;
3785 llvm_unreachable("covered switch fell through?!");
3788 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3789 /// specific condition code, returning the condition code and the LHS/RHS of the
3790 /// comparison to make.
3791 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3792 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3794 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3795 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3796 // X > -1 -> X == 0, jump !sign.
3797 RHS = DAG.getConstant(0, RHS.getValueType());
3798 return X86::COND_NS;
3800 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3801 // X < 0 -> X == 0, jump on sign.
3804 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3806 RHS = DAG.getConstant(0, RHS.getValueType());
3807 return X86::COND_LE;
3811 switch (SetCCOpcode) {
3812 default: llvm_unreachable("Invalid integer condition!");
3813 case ISD::SETEQ: return X86::COND_E;
3814 case ISD::SETGT: return X86::COND_G;
3815 case ISD::SETGE: return X86::COND_GE;
3816 case ISD::SETLT: return X86::COND_L;
3817 case ISD::SETLE: return X86::COND_LE;
3818 case ISD::SETNE: return X86::COND_NE;
3819 case ISD::SETULT: return X86::COND_B;
3820 case ISD::SETUGT: return X86::COND_A;
3821 case ISD::SETULE: return X86::COND_BE;
3822 case ISD::SETUGE: return X86::COND_AE;
3826 // First determine if it is required or is profitable to flip the operands.
3828 // If LHS is a foldable load, but RHS is not, flip the condition.
3829 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3830 !ISD::isNON_EXTLoad(RHS.getNode())) {
3831 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3832 std::swap(LHS, RHS);
3835 switch (SetCCOpcode) {
3841 std::swap(LHS, RHS);
3845 // On a floating point condition, the flags are set as follows:
3847 // 0 | 0 | 0 | X > Y
3848 // 0 | 0 | 1 | X < Y
3849 // 1 | 0 | 0 | X == Y
3850 // 1 | 1 | 1 | unordered
3851 switch (SetCCOpcode) {
3852 default: llvm_unreachable("Condcode should be pre-legalized away");
3854 case ISD::SETEQ: return X86::COND_E;
3855 case ISD::SETOLT: // flipped
3857 case ISD::SETGT: return X86::COND_A;
3858 case ISD::SETOLE: // flipped
3860 case ISD::SETGE: return X86::COND_AE;
3861 case ISD::SETUGT: // flipped
3863 case ISD::SETLT: return X86::COND_B;
3864 case ISD::SETUGE: // flipped
3866 case ISD::SETLE: return X86::COND_BE;
3868 case ISD::SETNE: return X86::COND_NE;
3869 case ISD::SETUO: return X86::COND_P;
3870 case ISD::SETO: return X86::COND_NP;
3872 case ISD::SETUNE: return X86::COND_INVALID;
3876 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3877 /// code. Current x86 isa includes the following FP cmov instructions:
3878 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3879 static bool hasFPCMov(unsigned X86CC) {
3895 /// isFPImmLegal - Returns true if the target can instruction select the
3896 /// specified FP immediate natively. If false, the legalizer will
3897 /// materialize the FP immediate as a load from a constant pool.
3898 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3899 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3900 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3906 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3907 ISD::LoadExtType ExtTy,
3909 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3910 // relocation target a movq or addq instruction: don't let the load shrink.
3911 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3912 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3913 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3914 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3918 /// \brief Returns true if it is beneficial to convert a load of a constant
3919 /// to just the constant itself.
3920 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3922 assert(Ty->isIntegerTy());
3924 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3925 if (BitSize == 0 || BitSize > 64)
3930 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3931 unsigned Index) const {
3932 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3935 return (Index == 0 || Index == ResVT.getVectorNumElements());
3938 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3939 // Speculate cttz only if we can directly use TZCNT.
3940 return Subtarget->hasBMI();
3943 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3944 // Speculate ctlz only if we can directly use LZCNT.
3945 return Subtarget->hasLZCNT();
3948 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3949 /// the specified range (L, H].
3950 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3951 return (Val < 0) || (Val >= Low && Val < Hi);
3954 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3955 /// specified value.
3956 static bool isUndefOrEqual(int Val, int CmpVal) {
3957 return (Val < 0 || Val == CmpVal);
3960 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3961 /// from position Pos and ending in Pos+Size, falls within the specified
3962 /// sequential range (Low, Low+Size]. or is undef.
3963 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3964 unsigned Pos, unsigned Size, int Low) {
3965 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3966 if (!isUndefOrEqual(Mask[i], Low))
3971 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3972 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3973 /// operand - by default will match for first operand.
3974 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3975 bool TestSecondOperand = false) {
3976 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3977 VT != MVT::v2f64 && VT != MVT::v2i64)
3980 unsigned NumElems = VT.getVectorNumElements();
3981 unsigned Lo = TestSecondOperand ? NumElems : 0;
3982 unsigned Hi = Lo + NumElems;
3984 for (unsigned i = 0; i < NumElems; ++i)
3985 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3991 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3992 /// is suitable for input to PSHUFHW.
3993 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3994 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3997 // Lower quadword copied in order or undef.
3998 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
4001 // Upper quadword shuffled.
4002 for (unsigned i = 4; i != 8; ++i)
4003 if (!isUndefOrInRange(Mask[i], 4, 8))
4006 if (VT == MVT::v16i16) {
4007 // Lower quadword copied in order or undef.
4008 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
4011 // Upper quadword shuffled.
4012 for (unsigned i = 12; i != 16; ++i)
4013 if (!isUndefOrInRange(Mask[i], 12, 16))
4020 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
4021 /// is suitable for input to PSHUFLW.
4022 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4023 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
4026 // Upper quadword copied in order.
4027 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4030 // Lower quadword shuffled.
4031 for (unsigned i = 0; i != 4; ++i)
4032 if (!isUndefOrInRange(Mask[i], 0, 4))
4035 if (VT == MVT::v16i16) {
4036 // Upper quadword copied in order.
4037 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4040 // Lower quadword shuffled.
4041 for (unsigned i = 8; i != 12; ++i)
4042 if (!isUndefOrInRange(Mask[i], 8, 12))
4049 /// \brief Return true if the mask specifies a shuffle of elements that is
4050 /// suitable for input to intralane (palignr) or interlane (valign) vector
4052 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4053 unsigned NumElts = VT.getVectorNumElements();
4054 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4055 unsigned NumLaneElts = NumElts/NumLanes;
4057 // Do not handle 64-bit element shuffles with palignr.
4058 if (NumLaneElts == 2)
4061 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4063 for (i = 0; i != NumLaneElts; ++i) {
4068 // Lane is all undef, go to next lane
4069 if (i == NumLaneElts)
4072 int Start = Mask[i+l];
4074 // Make sure its in this lane in one of the sources
4075 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4076 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4079 // If not lane 0, then we must match lane 0
4080 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4083 // Correct second source to be contiguous with first source
4084 if (Start >= (int)NumElts)
4085 Start -= NumElts - NumLaneElts;
4087 // Make sure we're shifting in the right direction.
4088 if (Start <= (int)(i+l))
4093 // Check the rest of the elements to see if they are consecutive.
4094 for (++i; i != NumLaneElts; ++i) {
4095 int Idx = Mask[i+l];
4097 // Make sure its in this lane
4098 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4099 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4102 // If not lane 0, then we must match lane 0
4103 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4106 if (Idx >= (int)NumElts)
4107 Idx -= NumElts - NumLaneElts;
4109 if (!isUndefOrEqual(Idx, Start+i))
4118 /// \brief Return true if the node specifies a shuffle of elements that is
4119 /// suitable for input to PALIGNR.
4120 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4121 const X86Subtarget *Subtarget) {
4122 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4123 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4124 VT.is512BitVector())
4125 // FIXME: Add AVX512BW.
4128 return isAlignrMask(Mask, VT, false);
4131 /// \brief Return true if the node specifies a shuffle of elements that is
4132 /// suitable for input to VALIGN.
4133 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4134 const X86Subtarget *Subtarget) {
4135 // FIXME: Add AVX512VL.
4136 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4138 return isAlignrMask(Mask, VT, true);
4141 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4142 /// the two vector operands have swapped position.
4143 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4144 unsigned NumElems) {
4145 for (unsigned i = 0; i != NumElems; ++i) {
4149 else if (idx < (int)NumElems)
4150 Mask[i] = idx + NumElems;
4152 Mask[i] = idx - NumElems;
4156 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4157 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4158 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4159 /// reverse of what x86 shuffles want.
4160 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4162 unsigned NumElems = VT.getVectorNumElements();
4163 unsigned NumLanes = VT.getSizeInBits()/128;
4164 unsigned NumLaneElems = NumElems/NumLanes;
4166 if (NumLaneElems != 2 && NumLaneElems != 4)
4169 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4170 bool symetricMaskRequired =
4171 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4173 // VSHUFPSY divides the resulting vector into 4 chunks.
4174 // The sources are also splitted into 4 chunks, and each destination
4175 // chunk must come from a different source chunk.
4177 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4178 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4180 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4181 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4183 // VSHUFPDY divides the resulting vector into 4 chunks.
4184 // The sources are also splitted into 4 chunks, and each destination
4185 // chunk must come from a different source chunk.
4187 // SRC1 => X3 X2 X1 X0
4188 // SRC2 => Y3 Y2 Y1 Y0
4190 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4192 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4193 unsigned HalfLaneElems = NumLaneElems/2;
4194 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4195 for (unsigned i = 0; i != NumLaneElems; ++i) {
4196 int Idx = Mask[i+l];
4197 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4198 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4200 // For VSHUFPSY, the mask of the second half must be the same as the
4201 // first but with the appropriate offsets. This works in the same way as
4202 // VPERMILPS works with masks.
4203 if (!symetricMaskRequired || Idx < 0)
4205 if (MaskVal[i] < 0) {
4206 MaskVal[i] = Idx - l;
4209 if ((signed)(Idx - l) != MaskVal[i])
4217 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4218 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4219 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4220 if (!VT.is128BitVector())
4223 unsigned NumElems = VT.getVectorNumElements();
4228 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4229 return isUndefOrEqual(Mask[0], 6) &&
4230 isUndefOrEqual(Mask[1], 7) &&
4231 isUndefOrEqual(Mask[2], 2) &&
4232 isUndefOrEqual(Mask[3], 3);
4235 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4236 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4238 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4239 if (!VT.is128BitVector())
4242 unsigned NumElems = VT.getVectorNumElements();
4247 return isUndefOrEqual(Mask[0], 2) &&
4248 isUndefOrEqual(Mask[1], 3) &&
4249 isUndefOrEqual(Mask[2], 2) &&
4250 isUndefOrEqual(Mask[3], 3);
4253 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4254 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4255 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4256 if (!VT.is128BitVector())
4259 unsigned NumElems = VT.getVectorNumElements();
4261 if (NumElems != 2 && NumElems != 4)
4264 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4265 if (!isUndefOrEqual(Mask[i], i + NumElems))
4268 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4269 if (!isUndefOrEqual(Mask[i], i))
4275 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4276 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4277 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4278 if (!VT.is128BitVector())
4281 unsigned NumElems = VT.getVectorNumElements();
4283 if (NumElems != 2 && NumElems != 4)
4286 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4287 if (!isUndefOrEqual(Mask[i], i))
4290 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4291 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4297 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4298 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4299 /// i. e: If all but one element come from the same vector.
4300 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4301 // TODO: Deal with AVX's VINSERTPS
4302 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4305 unsigned CorrectPosV1 = 0;
4306 unsigned CorrectPosV2 = 0;
4307 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4308 if (Mask[i] == -1) {
4316 else if (Mask[i] == i + 4)
4320 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4321 // We have 3 elements (undefs count as elements from any vector) from one
4322 // vector, and one from another.
4329 // Some special combinations that can be optimized.
4332 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4333 SelectionDAG &DAG) {
4334 MVT VT = SVOp->getSimpleValueType(0);
4337 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4340 ArrayRef<int> Mask = SVOp->getMask();
4342 // These are the special masks that may be optimized.
4343 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4344 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4345 bool MatchEvenMask = true;
4346 bool MatchOddMask = true;
4347 for (int i=0; i<8; ++i) {
4348 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4349 MatchEvenMask = false;
4350 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4351 MatchOddMask = false;
4354 if (!MatchEvenMask && !MatchOddMask)
4357 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4359 SDValue Op0 = SVOp->getOperand(0);
4360 SDValue Op1 = SVOp->getOperand(1);
4362 if (MatchEvenMask) {
4363 // Shift the second operand right to 32 bits.
4364 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4365 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4367 // Shift the first operand left to 32 bits.
4368 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4369 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4371 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4372 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4375 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4376 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4377 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4378 bool HasInt256, bool V2IsSplat = false) {
4380 assert(VT.getSizeInBits() >= 128 &&
4381 "Unsupported vector type for unpckl");
4383 unsigned NumElts = VT.getVectorNumElements();
4384 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4385 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4388 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4389 "Unsupported vector type for unpckh");
4391 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4392 unsigned NumLanes = VT.getSizeInBits()/128;
4393 unsigned NumLaneElts = NumElts/NumLanes;
4395 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4396 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4397 int BitI = Mask[l+i];
4398 int BitI1 = Mask[l+i+1];
4399 if (!isUndefOrEqual(BitI, j))
4402 if (!isUndefOrEqual(BitI1, NumElts))
4405 if (!isUndefOrEqual(BitI1, j + NumElts))
4414 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4415 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4416 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4417 bool HasInt256, bool V2IsSplat = false) {
4418 assert(VT.getSizeInBits() >= 128 &&
4419 "Unsupported vector type for unpckh");
4421 unsigned NumElts = VT.getVectorNumElements();
4422 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4423 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4426 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4427 "Unsupported vector type for unpckh");
4429 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4430 unsigned NumLanes = VT.getSizeInBits()/128;
4431 unsigned NumLaneElts = NumElts/NumLanes;
4433 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4434 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4435 int BitI = Mask[l+i];
4436 int BitI1 = Mask[l+i+1];
4437 if (!isUndefOrEqual(BitI, j))
4440 if (isUndefOrEqual(BitI1, NumElts))
4443 if (!isUndefOrEqual(BitI1, j+NumElts))
4451 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4452 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4454 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4455 unsigned NumElts = VT.getVectorNumElements();
4456 bool Is256BitVec = VT.is256BitVector();
4458 if (VT.is512BitVector())
4460 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4461 "Unsupported vector type for unpckh");
4463 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4464 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4467 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4468 // FIXME: Need a better way to get rid of this, there's no latency difference
4469 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4470 // the former later. We should also remove the "_undef" special mask.
4471 if (NumElts == 4 && Is256BitVec)
4474 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4475 // independently on 128-bit lanes.
4476 unsigned NumLanes = VT.getSizeInBits()/128;
4477 unsigned NumLaneElts = NumElts/NumLanes;
4479 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4480 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4481 int BitI = Mask[l+i];
4482 int BitI1 = Mask[l+i+1];
4484 if (!isUndefOrEqual(BitI, j))
4486 if (!isUndefOrEqual(BitI1, j))
4494 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4495 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4497 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4498 unsigned NumElts = VT.getVectorNumElements();
4500 if (VT.is512BitVector())
4503 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4504 "Unsupported vector type for unpckh");
4506 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4507 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4510 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4511 // independently on 128-bit lanes.
4512 unsigned NumLanes = VT.getSizeInBits()/128;
4513 unsigned NumLaneElts = NumElts/NumLanes;
4515 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4516 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4517 int BitI = Mask[l+i];
4518 int BitI1 = Mask[l+i+1];
4519 if (!isUndefOrEqual(BitI, j))
4521 if (!isUndefOrEqual(BitI1, j))
4528 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4529 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4530 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4531 if (!VT.is512BitVector())
4534 unsigned NumElts = VT.getVectorNumElements();
4535 unsigned HalfSize = NumElts/2;
4536 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4537 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4542 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4543 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4551 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4552 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4553 /// MOVSD, and MOVD, i.e. setting the lowest element.
4554 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4555 if (VT.getVectorElementType().getSizeInBits() < 32)
4557 if (!VT.is128BitVector())
4560 unsigned NumElts = VT.getVectorNumElements();
4562 if (!isUndefOrEqual(Mask[0], NumElts))
4565 for (unsigned i = 1; i != NumElts; ++i)
4566 if (!isUndefOrEqual(Mask[i], i))
4572 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4573 /// as permutations between 128-bit chunks or halves. As an example: this
4575 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4576 /// The first half comes from the second half of V1 and the second half from the
4577 /// the second half of V2.
4578 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4579 if (!HasFp256 || !VT.is256BitVector())
4582 // The shuffle result is divided into half A and half B. In total the two
4583 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4584 // B must come from C, D, E or F.
4585 unsigned HalfSize = VT.getVectorNumElements()/2;
4586 bool MatchA = false, MatchB = false;
4588 // Check if A comes from one of C, D, E, F.
4589 for (unsigned Half = 0; Half != 4; ++Half) {
4590 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4596 // Check if B comes from one of C, D, E, F.
4597 for (unsigned Half = 0; Half != 4; ++Half) {
4598 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4604 return MatchA && MatchB;
4607 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4608 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4609 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4610 MVT VT = SVOp->getSimpleValueType(0);
4612 unsigned HalfSize = VT.getVectorNumElements()/2;
4614 unsigned FstHalf = 0, SndHalf = 0;
4615 for (unsigned i = 0; i < HalfSize; ++i) {
4616 if (SVOp->getMaskElt(i) > 0) {
4617 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4621 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4622 if (SVOp->getMaskElt(i) > 0) {
4623 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4628 return (FstHalf | (SndHalf << 4));
4631 // Symetric in-lane mask. Each lane has 4 elements (for imm8)
4632 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4633 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4637 unsigned NumElts = VT.getVectorNumElements();
4639 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4640 for (unsigned i = 0; i != NumElts; ++i) {
4643 Imm8 |= Mask[i] << (i*2);
4648 unsigned LaneSize = 4;
4649 SmallVector<int, 4> MaskVal(LaneSize, -1);
4651 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4652 for (unsigned i = 0; i != LaneSize; ++i) {
4653 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4657 if (MaskVal[i] < 0) {
4658 MaskVal[i] = Mask[i+l] - l;
4659 Imm8 |= MaskVal[i] << (i*2);
4662 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4669 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4670 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4671 /// Note that VPERMIL mask matching is different depending whether theunderlying
4672 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4673 /// to the same elements of the low, but to the higher half of the source.
4674 /// In VPERMILPD the two lanes could be shuffled independently of each other
4675 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4676 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4677 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4678 if (VT.getSizeInBits() < 256 || EltSize < 32)
4680 bool symetricMaskRequired = (EltSize == 32);
4681 unsigned NumElts = VT.getVectorNumElements();
4683 unsigned NumLanes = VT.getSizeInBits()/128;
4684 unsigned LaneSize = NumElts/NumLanes;
4685 // 2 or 4 elements in one lane
4687 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4688 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4689 for (unsigned i = 0; i != LaneSize; ++i) {
4690 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4692 if (symetricMaskRequired) {
4693 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4694 ExpectedMaskVal[i] = Mask[i+l] - l;
4697 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4705 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4706 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4707 /// element of vector 2 and the other elements to come from vector 1 in order.
4708 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4709 bool V2IsSplat = false, bool V2IsUndef = false) {
4710 if (!VT.is128BitVector())
4713 unsigned NumOps = VT.getVectorNumElements();
4714 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4717 if (!isUndefOrEqual(Mask[0], 0))
4720 for (unsigned i = 1; i != NumOps; ++i)
4721 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4722 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4723 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4729 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4730 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4731 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4732 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4733 const X86Subtarget *Subtarget) {
4734 if (!Subtarget->hasSSE3())
4737 unsigned NumElems = VT.getVectorNumElements();
4739 if ((VT.is128BitVector() && NumElems != 4) ||
4740 (VT.is256BitVector() && NumElems != 8) ||
4741 (VT.is512BitVector() && NumElems != 16))
4744 // "i+1" is the value the indexed mask element must have
4745 for (unsigned i = 0; i != NumElems; i += 2)
4746 if (!isUndefOrEqual(Mask[i], i+1) ||
4747 !isUndefOrEqual(Mask[i+1], i+1))
4753 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4754 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4755 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4756 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4757 const X86Subtarget *Subtarget) {
4758 if (!Subtarget->hasSSE3())
4761 unsigned NumElems = VT.getVectorNumElements();
4763 if ((VT.is128BitVector() && NumElems != 4) ||
4764 (VT.is256BitVector() && NumElems != 8) ||
4765 (VT.is512BitVector() && NumElems != 16))
4768 // "i" is the value the indexed mask element must have
4769 for (unsigned i = 0; i != NumElems; i += 2)
4770 if (!isUndefOrEqual(Mask[i], i) ||
4771 !isUndefOrEqual(Mask[i+1], i))
4777 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4778 /// specifies a shuffle of elements that is suitable for input to 256-bit
4779 /// version of MOVDDUP.
4780 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4781 if (!HasFp256 || !VT.is256BitVector())
4784 unsigned NumElts = VT.getVectorNumElements();
4788 for (unsigned i = 0; i != NumElts/2; ++i)
4789 if (!isUndefOrEqual(Mask[i], 0))
4791 for (unsigned i = NumElts/2; i != NumElts; ++i)
4792 if (!isUndefOrEqual(Mask[i], NumElts/2))
4797 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4798 /// specifies a shuffle of elements that is suitable for input to 128-bit
4799 /// version of MOVDDUP.
4800 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4801 if (!VT.is128BitVector())
4804 unsigned e = VT.getVectorNumElements() / 2;
4805 for (unsigned i = 0; i != e; ++i)
4806 if (!isUndefOrEqual(Mask[i], i))
4808 for (unsigned i = 0; i != e; ++i)
4809 if (!isUndefOrEqual(Mask[e+i], i))
4814 /// isVEXTRACTIndex - Return true if the specified
4815 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4816 /// suitable for instruction that extract 128 or 256 bit vectors
4817 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4818 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4819 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4822 // The index should be aligned on a vecWidth-bit boundary.
4824 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4826 MVT VT = N->getSimpleValueType(0);
4827 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4828 bool Result = (Index * ElSize) % vecWidth == 0;
4833 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4834 /// operand specifies a subvector insert that is suitable for input to
4835 /// insertion of 128 or 256-bit subvectors
4836 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4837 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4838 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4840 // The index should be aligned on a vecWidth-bit boundary.
4842 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4844 MVT VT = N->getSimpleValueType(0);
4845 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4846 bool Result = (Index * ElSize) % vecWidth == 0;
4851 bool X86::isVINSERT128Index(SDNode *N) {
4852 return isVINSERTIndex(N, 128);
4855 bool X86::isVINSERT256Index(SDNode *N) {
4856 return isVINSERTIndex(N, 256);
4859 bool X86::isVEXTRACT128Index(SDNode *N) {
4860 return isVEXTRACTIndex(N, 128);
4863 bool X86::isVEXTRACT256Index(SDNode *N) {
4864 return isVEXTRACTIndex(N, 256);
4867 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4868 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4869 /// Handles 128-bit and 256-bit.
4870 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4871 MVT VT = N->getSimpleValueType(0);
4873 assert((VT.getSizeInBits() >= 128) &&
4874 "Unsupported vector type for PSHUF/SHUFP");
4876 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4877 // independently on 128-bit lanes.
4878 unsigned NumElts = VT.getVectorNumElements();
4879 unsigned NumLanes = VT.getSizeInBits()/128;
4880 unsigned NumLaneElts = NumElts/NumLanes;
4882 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4883 "Only supports 2, 4 or 8 elements per lane");
4885 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4887 for (unsigned i = 0; i != NumElts; ++i) {
4888 int Elt = N->getMaskElt(i);
4889 if (Elt < 0) continue;
4890 Elt &= NumLaneElts - 1;
4891 unsigned ShAmt = (i << Shift) % 8;
4892 Mask |= Elt << ShAmt;
4898 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4899 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4900 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4901 MVT VT = N->getSimpleValueType(0);
4903 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4904 "Unsupported vector type for PSHUFHW");
4906 unsigned NumElts = VT.getVectorNumElements();
4909 for (unsigned l = 0; l != NumElts; l += 8) {
4910 // 8 nodes per lane, but we only care about the last 4.
4911 for (unsigned i = 0; i < 4; ++i) {
4912 int Elt = N->getMaskElt(l+i+4);
4913 if (Elt < 0) continue;
4914 Elt &= 0x3; // only 2-bits.
4915 Mask |= Elt << (i * 2);
4922 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4923 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4924 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4925 MVT VT = N->getSimpleValueType(0);
4927 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4928 "Unsupported vector type for PSHUFHW");
4930 unsigned NumElts = VT.getVectorNumElements();
4933 for (unsigned l = 0; l != NumElts; l += 8) {
4934 // 8 nodes per lane, but we only care about the first 4.
4935 for (unsigned i = 0; i < 4; ++i) {
4936 int Elt = N->getMaskElt(l+i);
4937 if (Elt < 0) continue;
4938 Elt &= 0x3; // only 2-bits
4939 Mask |= Elt << (i * 2);
4946 /// \brief Return the appropriate immediate to shuffle the specified
4947 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4948 /// VALIGN (if Interlane is true) instructions.
4949 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4951 MVT VT = SVOp->getSimpleValueType(0);
4952 unsigned EltSize = InterLane ? 1 :
4953 VT.getVectorElementType().getSizeInBits() >> 3;
4955 unsigned NumElts = VT.getVectorNumElements();
4956 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4957 unsigned NumLaneElts = NumElts/NumLanes;
4961 for (i = 0; i != NumElts; ++i) {
4962 Val = SVOp->getMaskElt(i);
4966 if (Val >= (int)NumElts)
4967 Val -= NumElts - NumLaneElts;
4969 assert(Val - i > 0 && "PALIGNR imm should be positive");
4970 return (Val - i) * EltSize;
4973 /// \brief Return the appropriate immediate to shuffle the specified
4974 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4975 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4976 return getShuffleAlignrImmediate(SVOp, false);
4979 /// \brief Return the appropriate immediate to shuffle the specified
4980 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4981 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4982 return getShuffleAlignrImmediate(SVOp, true);
4986 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4987 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4988 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4989 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4992 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4994 MVT VecVT = N->getOperand(0).getSimpleValueType();
4995 MVT ElVT = VecVT.getVectorElementType();
4997 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4998 return Index / NumElemsPerChunk;
5001 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
5002 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
5003 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
5004 llvm_unreachable("Illegal insert subvector for VINSERT");
5007 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
5009 MVT VecVT = N->getSimpleValueType(0);
5010 MVT ElVT = VecVT.getVectorElementType();
5012 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
5013 return Index / NumElemsPerChunk;
5016 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
5017 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
5018 /// and VINSERTI128 instructions.
5019 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
5020 return getExtractVEXTRACTImmediate(N, 128);
5023 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
5024 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
5025 /// and VINSERTI64x4 instructions.
5026 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5027 return getExtractVEXTRACTImmediate(N, 256);
5030 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5031 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5032 /// and VINSERTI128 instructions.
5033 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5034 return getInsertVINSERTImmediate(N, 128);
5037 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5038 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5039 /// and VINSERTI64x4 instructions.
5040 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5041 return getInsertVINSERTImmediate(N, 256);
5044 /// isZero - Returns true if Elt is a constant integer zero
5045 static bool isZero(SDValue V) {
5046 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5047 return C && C->isNullValue();
5050 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5052 bool X86::isZeroNode(SDValue Elt) {
5055 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5056 return CFP->getValueAPF().isPosZero();
5060 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5061 /// match movhlps. The lower half elements should come from upper half of
5062 /// V1 (and in order), and the upper half elements should come from the upper
5063 /// half of V2 (and in order).
5064 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5065 if (!VT.is128BitVector())
5067 if (VT.getVectorNumElements() != 4)
5069 for (unsigned i = 0, e = 2; i != e; ++i)
5070 if (!isUndefOrEqual(Mask[i], i+2))
5072 for (unsigned i = 2; i != 4; ++i)
5073 if (!isUndefOrEqual(Mask[i], i+4))
5078 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5079 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5081 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5082 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5084 N = N->getOperand(0).getNode();
5085 if (!ISD::isNON_EXTLoad(N))
5088 *LD = cast<LoadSDNode>(N);
5092 // Test whether the given value is a vector value which will be legalized
5094 static bool WillBeConstantPoolLoad(SDNode *N) {
5095 if (N->getOpcode() != ISD::BUILD_VECTOR)
5098 // Check for any non-constant elements.
5099 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5100 switch (N->getOperand(i).getNode()->getOpcode()) {
5102 case ISD::ConstantFP:
5109 // Vectors of all-zeros and all-ones are materialized with special
5110 // instructions rather than being loaded.
5111 return !ISD::isBuildVectorAllZeros(N) &&
5112 !ISD::isBuildVectorAllOnes(N);
5115 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5116 /// match movlp{s|d}. The lower half elements should come from lower half of
5117 /// V1 (and in order), and the upper half elements should come from the upper
5118 /// half of V2 (and in order). And since V1 will become the source of the
5119 /// MOVLP, it must be either a vector load or a scalar load to vector.
5120 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5121 ArrayRef<int> Mask, MVT VT) {
5122 if (!VT.is128BitVector())
5125 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5127 // Is V2 is a vector load, don't do this transformation. We will try to use
5128 // load folding shufps op.
5129 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5132 unsigned NumElems = VT.getVectorNumElements();
5134 if (NumElems != 2 && NumElems != 4)
5136 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5137 if (!isUndefOrEqual(Mask[i], i))
5139 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5140 if (!isUndefOrEqual(Mask[i], i+NumElems))
5145 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5146 /// to an zero vector.
5147 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5148 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5149 SDValue V1 = N->getOperand(0);
5150 SDValue V2 = N->getOperand(1);
5151 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5152 for (unsigned i = 0; i != NumElems; ++i) {
5153 int Idx = N->getMaskElt(i);
5154 if (Idx >= (int)NumElems) {
5155 unsigned Opc = V2.getOpcode();
5156 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5158 if (Opc != ISD::BUILD_VECTOR ||
5159 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5161 } else if (Idx >= 0) {
5162 unsigned Opc = V1.getOpcode();
5163 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5165 if (Opc != ISD::BUILD_VECTOR ||
5166 !X86::isZeroNode(V1.getOperand(Idx)))
5173 /// getZeroVector - Returns a vector of specified type with all zero elements.
5175 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5176 SelectionDAG &DAG, SDLoc dl) {
5177 assert(VT.isVector() && "Expected a vector type");
5179 // Always build SSE zero vectors as <4 x i32> bitcasted
5180 // to their dest type. This ensures they get CSE'd.
5182 if (VT.is128BitVector()) { // SSE
5183 if (Subtarget->hasSSE2()) { // SSE2
5184 SDValue Cst = DAG.getConstant(0, MVT::i32);
5185 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5187 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5188 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5190 } else if (VT.is256BitVector()) { // AVX
5191 if (Subtarget->hasInt256()) { // AVX2
5192 SDValue Cst = DAG.getConstant(0, MVT::i32);
5193 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5194 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5196 // 256-bit logic and arithmetic instructions in AVX are all
5197 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5198 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5199 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5200 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5202 } else if (VT.is512BitVector()) { // AVX-512
5203 SDValue Cst = DAG.getConstant(0, MVT::i32);
5204 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5205 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5207 } else if (VT.getScalarType() == MVT::i1) {
5208 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5209 SDValue Cst = DAG.getConstant(0, MVT::i1);
5210 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5211 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5213 llvm_unreachable("Unexpected vector type");
5215 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5218 /// getOnesVector - Returns a vector of specified type with all bits set.
5219 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5220 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5221 /// Then bitcast to their original type, ensuring they get CSE'd.
5222 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5224 assert(VT.isVector() && "Expected a vector type");
5226 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5228 if (VT.is256BitVector()) {
5229 if (HasInt256) { // AVX2
5230 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5231 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5233 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5234 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5236 } else if (VT.is128BitVector()) {
5237 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5239 llvm_unreachable("Unexpected vector type");
5241 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5244 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5245 /// that point to V2 points to its first element.
5246 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5247 for (unsigned i = 0; i != NumElems; ++i) {
5248 if (Mask[i] > (int)NumElems) {
5254 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5255 /// operation of specified width.
5256 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5258 unsigned NumElems = VT.getVectorNumElements();
5259 SmallVector<int, 8> Mask;
5260 Mask.push_back(NumElems);
5261 for (unsigned i = 1; i != NumElems; ++i)
5263 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5266 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5267 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5269 unsigned NumElems = VT.getVectorNumElements();
5270 SmallVector<int, 8> Mask;
5271 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5273 Mask.push_back(i + NumElems);
5275 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5278 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5279 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5281 unsigned NumElems = VT.getVectorNumElements();
5282 SmallVector<int, 8> Mask;
5283 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5284 Mask.push_back(i + Half);
5285 Mask.push_back(i + NumElems + Half);
5287 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5290 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5291 // a generic shuffle instruction because the target has no such instructions.
5292 // Generate shuffles which repeat i16 and i8 several times until they can be
5293 // represented by v4f32 and then be manipulated by target suported shuffles.
5294 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5295 MVT VT = V.getSimpleValueType();
5296 int NumElems = VT.getVectorNumElements();
5299 while (NumElems > 4) {
5300 if (EltNo < NumElems/2) {
5301 V = getUnpackl(DAG, dl, VT, V, V);
5303 V = getUnpackh(DAG, dl, VT, V, V);
5304 EltNo -= NumElems/2;
5311 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5312 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5313 MVT VT = V.getSimpleValueType();
5316 if (VT.is128BitVector()) {
5317 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5318 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5319 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5321 } else if (VT.is256BitVector()) {
5322 // To use VPERMILPS to splat scalars, the second half of indicies must
5323 // refer to the higher part, which is a duplication of the lower one,
5324 // because VPERMILPS can only handle in-lane permutations.
5325 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5326 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5328 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5329 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5332 llvm_unreachable("Vector size not supported");
5334 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5337 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5338 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5339 MVT SrcVT = SV->getSimpleValueType(0);
5340 SDValue V1 = SV->getOperand(0);
5343 int EltNo = SV->getSplatIndex();
5344 int NumElems = SrcVT.getVectorNumElements();
5345 bool Is256BitVec = SrcVT.is256BitVector();
5347 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5348 "Unknown how to promote splat for type");
5350 // Extract the 128-bit part containing the splat element and update
5351 // the splat element index when it refers to the higher register.
5353 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5354 if (EltNo >= NumElems/2)
5355 EltNo -= NumElems/2;
5358 // All i16 and i8 vector types can't be used directly by a generic shuffle
5359 // instruction because the target has no such instruction. Generate shuffles
5360 // which repeat i16 and i8 several times until they fit in i32, and then can
5361 // be manipulated by target suported shuffles.
5362 MVT EltVT = SrcVT.getVectorElementType();
5363 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5364 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5366 // Recreate the 256-bit vector and place the same 128-bit vector
5367 // into the low and high part. This is necessary because we want
5368 // to use VPERM* to shuffle the vectors
5370 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5373 return getLegalSplat(DAG, V1, EltNo);
5376 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5377 /// vector of zero or undef vector. This produces a shuffle where the low
5378 /// element of V2 is swizzled into the zero/undef vector, landing at element
5379 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5380 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5382 const X86Subtarget *Subtarget,
5383 SelectionDAG &DAG) {
5384 MVT VT = V2.getSimpleValueType();
5386 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5387 unsigned NumElems = VT.getVectorNumElements();
5388 SmallVector<int, 16> MaskVec;
5389 for (unsigned i = 0; i != NumElems; ++i)
5390 // If this is the insertion idx, put the low elt of V2 here.
5391 MaskVec.push_back(i == Idx ? NumElems : i);
5392 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5395 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5396 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5397 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5398 /// shuffles which use a single input multiple times, and in those cases it will
5399 /// adjust the mask to only have indices within that single input.
5400 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5401 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5402 unsigned NumElems = VT.getVectorNumElements();
5406 bool IsFakeUnary = false;
5407 switch(N->getOpcode()) {
5408 case X86ISD::BLENDI:
5409 ImmN = N->getOperand(N->getNumOperands()-1);
5410 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5413 ImmN = N->getOperand(N->getNumOperands()-1);
5414 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5415 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5417 case X86ISD::UNPCKH:
5418 DecodeUNPCKHMask(VT, Mask);
5419 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5421 case X86ISD::UNPCKL:
5422 DecodeUNPCKLMask(VT, Mask);
5423 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5425 case X86ISD::MOVHLPS:
5426 DecodeMOVHLPSMask(NumElems, Mask);
5427 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5429 case X86ISD::MOVLHPS:
5430 DecodeMOVLHPSMask(NumElems, Mask);
5431 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5433 case X86ISD::PALIGNR:
5434 ImmN = N->getOperand(N->getNumOperands()-1);
5435 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5437 case X86ISD::PSHUFD:
5438 case X86ISD::VPERMILPI:
5439 ImmN = N->getOperand(N->getNumOperands()-1);
5440 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5443 case X86ISD::PSHUFHW:
5444 ImmN = N->getOperand(N->getNumOperands()-1);
5445 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5448 case X86ISD::PSHUFLW:
5449 ImmN = N->getOperand(N->getNumOperands()-1);
5450 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5453 case X86ISD::PSHUFB: {
5455 SDValue MaskNode = N->getOperand(1);
5456 while (MaskNode->getOpcode() == ISD::BITCAST)
5457 MaskNode = MaskNode->getOperand(0);
5459 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5460 // If we have a build-vector, then things are easy.
5461 EVT VT = MaskNode.getValueType();
5462 assert(VT.isVector() &&
5463 "Can't produce a non-vector with a build_vector!");
5464 if (!VT.isInteger())
5467 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5469 SmallVector<uint64_t, 32> RawMask;
5470 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5471 SDValue Op = MaskNode->getOperand(i);
5472 if (Op->getOpcode() == ISD::UNDEF) {
5473 RawMask.push_back((uint64_t)SM_SentinelUndef);
5476 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5479 APInt MaskElement = CN->getAPIntValue();
5481 // We now have to decode the element which could be any integer size and
5482 // extract each byte of it.
5483 for (int j = 0; j < NumBytesPerElement; ++j) {
5484 // Note that this is x86 and so always little endian: the low byte is
5485 // the first byte of the mask.
5486 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5487 MaskElement = MaskElement.lshr(8);
5490 DecodePSHUFBMask(RawMask, Mask);
5494 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5498 SDValue Ptr = MaskLoad->getBasePtr();
5499 if (Ptr->getOpcode() == X86ISD::Wrapper)
5500 Ptr = Ptr->getOperand(0);
5502 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5503 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5506 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5507 DecodePSHUFBMask(C, Mask);
5513 case X86ISD::VPERMI:
5514 ImmN = N->getOperand(N->getNumOperands()-1);
5515 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5519 case X86ISD::MOVSD: {
5520 // The index 0 always comes from the first element of the second source,
5521 // this is why MOVSS and MOVSD are used in the first place. The other
5522 // elements come from the other positions of the first source vector
5523 Mask.push_back(NumElems);
5524 for (unsigned i = 1; i != NumElems; ++i) {
5529 case X86ISD::VPERM2X128:
5530 ImmN = N->getOperand(N->getNumOperands()-1);
5531 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5532 if (Mask.empty()) return false;
5534 case X86ISD::MOVSLDUP:
5535 DecodeMOVSLDUPMask(VT, Mask);
5538 case X86ISD::MOVSHDUP:
5539 DecodeMOVSHDUPMask(VT, Mask);
5542 case X86ISD::MOVDDUP:
5543 DecodeMOVDDUPMask(VT, Mask);
5546 case X86ISD::MOVLHPD:
5547 case X86ISD::MOVLPD:
5548 case X86ISD::MOVLPS:
5549 // Not yet implemented
5551 default: llvm_unreachable("unknown target shuffle node");
5554 // If we have a fake unary shuffle, the shuffle mask is spread across two
5555 // inputs that are actually the same node. Re-map the mask to always point
5556 // into the first input.
5559 if (M >= (int)Mask.size())
5565 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5566 /// element of the result of the vector shuffle.
5567 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5570 return SDValue(); // Limit search depth.
5572 SDValue V = SDValue(N, 0);
5573 EVT VT = V.getValueType();
5574 unsigned Opcode = V.getOpcode();
5576 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5577 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5578 int Elt = SV->getMaskElt(Index);
5581 return DAG.getUNDEF(VT.getVectorElementType());
5583 unsigned NumElems = VT.getVectorNumElements();
5584 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5585 : SV->getOperand(1);
5586 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5589 // Recurse into target specific vector shuffles to find scalars.
5590 if (isTargetShuffle(Opcode)) {
5591 MVT ShufVT = V.getSimpleValueType();
5592 unsigned NumElems = ShufVT.getVectorNumElements();
5593 SmallVector<int, 16> ShuffleMask;
5596 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5599 int Elt = ShuffleMask[Index];
5601 return DAG.getUNDEF(ShufVT.getVectorElementType());
5603 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5605 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5609 // Actual nodes that may contain scalar elements
5610 if (Opcode == ISD::BITCAST) {
5611 V = V.getOperand(0);
5612 EVT SrcVT = V.getValueType();
5613 unsigned NumElems = VT.getVectorNumElements();
5615 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5619 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5620 return (Index == 0) ? V.getOperand(0)
5621 : DAG.getUNDEF(VT.getVectorElementType());
5623 if (V.getOpcode() == ISD::BUILD_VECTOR)
5624 return V.getOperand(Index);
5629 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5630 /// shuffle operation which come from a consecutively from a zero. The
5631 /// search can start in two different directions, from left or right.
5632 /// We count undefs as zeros until PreferredNum is reached.
5633 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5634 unsigned NumElems, bool ZerosFromLeft,
5636 unsigned PreferredNum = -1U) {
5637 unsigned NumZeros = 0;
5638 for (unsigned i = 0; i != NumElems; ++i) {
5639 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5640 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5644 if (X86::isZeroNode(Elt))
5646 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5647 NumZeros = std::min(NumZeros + 1, PreferredNum);
5655 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5656 /// correspond consecutively to elements from one of the vector operands,
5657 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5659 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5660 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5661 unsigned NumElems, unsigned &OpNum) {
5662 bool SeenV1 = false;
5663 bool SeenV2 = false;
5665 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5666 int Idx = SVOp->getMaskElt(i);
5667 // Ignore undef indicies
5671 if (Idx < (int)NumElems)
5676 // Only accept consecutive elements from the same vector
5677 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5681 OpNum = SeenV1 ? 0 : 1;
5685 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5686 /// logical left shift of a vector.
5687 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5688 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5690 SVOp->getSimpleValueType(0).getVectorNumElements();
5691 unsigned NumZeros = getNumOfConsecutiveZeros(
5692 SVOp, NumElems, false /* check zeros from right */, DAG,
5693 SVOp->getMaskElt(0));
5699 // Considering the elements in the mask that are not consecutive zeros,
5700 // check if they consecutively come from only one of the source vectors.
5702 // V1 = {X, A, B, C} 0
5704 // vector_shuffle V1, V2 <1, 2, 3, X>
5706 if (!isShuffleMaskConsecutive(SVOp,
5707 0, // Mask Start Index
5708 NumElems-NumZeros, // Mask End Index(exclusive)
5709 NumZeros, // Where to start looking in the src vector
5710 NumElems, // Number of elements in vector
5711 OpSrc)) // Which source operand ?
5716 ShVal = SVOp->getOperand(OpSrc);
5720 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5721 /// logical left shift of a vector.
5722 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5723 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5725 SVOp->getSimpleValueType(0).getVectorNumElements();
5726 unsigned NumZeros = getNumOfConsecutiveZeros(
5727 SVOp, NumElems, true /* check zeros from left */, DAG,
5728 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5734 // Considering the elements in the mask that are not consecutive zeros,
5735 // check if they consecutively come from only one of the source vectors.
5737 // 0 { A, B, X, X } = V2
5739 // vector_shuffle V1, V2 <X, X, 4, 5>
5741 if (!isShuffleMaskConsecutive(SVOp,
5742 NumZeros, // Mask Start Index
5743 NumElems, // Mask End Index(exclusive)
5744 0, // Where to start looking in the src vector
5745 NumElems, // Number of elements in vector
5746 OpSrc)) // Which source operand ?
5751 ShVal = SVOp->getOperand(OpSrc);
5755 /// isVectorShift - Returns true if the shuffle can be implemented as a
5756 /// logical left or right shift of a vector.
5757 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5758 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5759 // Although the logic below support any bitwidth size, there are no
5760 // shift instructions which handle more than 128-bit vectors.
5761 if (!SVOp->getSimpleValueType(0).is128BitVector())
5764 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5765 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5771 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5773 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5774 unsigned NumNonZero, unsigned NumZero,
5776 const X86Subtarget* Subtarget,
5777 const TargetLowering &TLI) {
5784 for (unsigned i = 0; i < 16; ++i) {
5785 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5786 if (ThisIsNonZero && First) {
5788 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5790 V = DAG.getUNDEF(MVT::v8i16);
5795 SDValue ThisElt, LastElt;
5796 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5797 if (LastIsNonZero) {
5798 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5799 MVT::i16, Op.getOperand(i-1));
5801 if (ThisIsNonZero) {
5802 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5803 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5804 ThisElt, DAG.getConstant(8, MVT::i8));
5806 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5810 if (ThisElt.getNode())
5811 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5812 DAG.getIntPtrConstant(i/2));
5816 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5819 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5821 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5822 unsigned NumNonZero, unsigned NumZero,
5824 const X86Subtarget* Subtarget,
5825 const TargetLowering &TLI) {
5832 for (unsigned i = 0; i < 8; ++i) {
5833 bool isNonZero = (NonZeros & (1 << i)) != 0;
5837 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5839 V = DAG.getUNDEF(MVT::v8i16);
5842 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5843 MVT::v8i16, V, Op.getOperand(i),
5844 DAG.getIntPtrConstant(i));
5851 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5852 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5853 const X86Subtarget *Subtarget,
5854 const TargetLowering &TLI) {
5855 // Find all zeroable elements.
5857 for (int i=0; i < 4; ++i) {
5858 SDValue Elt = Op->getOperand(i);
5859 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5861 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5862 [](bool M) { return !M; }) > 1 &&
5863 "We expect at least two non-zero elements!");
5865 // We only know how to deal with build_vector nodes where elements are either
5866 // zeroable or extract_vector_elt with constant index.
5867 SDValue FirstNonZero;
5868 unsigned FirstNonZeroIdx;
5869 for (unsigned i=0; i < 4; ++i) {
5872 SDValue Elt = Op->getOperand(i);
5873 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5874 !isa<ConstantSDNode>(Elt.getOperand(1)))
5876 // Make sure that this node is extracting from a 128-bit vector.
5877 MVT VT = Elt.getOperand(0).getSimpleValueType();
5878 if (!VT.is128BitVector())
5880 if (!FirstNonZero.getNode()) {
5882 FirstNonZeroIdx = i;
5886 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5887 SDValue V1 = FirstNonZero.getOperand(0);
5888 MVT VT = V1.getSimpleValueType();
5890 // See if this build_vector can be lowered as a blend with zero.
5892 unsigned EltMaskIdx, EltIdx;
5894 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5895 if (Zeroable[EltIdx]) {
5896 // The zero vector will be on the right hand side.
5897 Mask[EltIdx] = EltIdx+4;
5901 Elt = Op->getOperand(EltIdx);
5902 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5903 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5904 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5906 Mask[EltIdx] = EltIdx;
5910 // Let the shuffle legalizer deal with blend operations.
5911 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5912 if (V1.getSimpleValueType() != VT)
5913 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5914 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5917 // See if we can lower this build_vector to a INSERTPS.
5918 if (!Subtarget->hasSSE41())
5921 SDValue V2 = Elt.getOperand(0);
5922 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5925 bool CanFold = true;
5926 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5930 SDValue Current = Op->getOperand(i);
5931 SDValue SrcVector = Current->getOperand(0);
5934 CanFold = SrcVector == V1 &&
5935 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5941 assert(V1.getNode() && "Expected at least two non-zero elements!");
5942 if (V1.getSimpleValueType() != MVT::v4f32)
5943 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5944 if (V2.getSimpleValueType() != MVT::v4f32)
5945 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5947 // Ok, we can emit an INSERTPS instruction.
5949 for (int i = 0; i < 4; ++i)
5953 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5954 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5955 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5956 DAG.getIntPtrConstant(InsertPSMask));
5957 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5960 /// getVShift - Return a vector logical shift node.
5962 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5963 unsigned NumBits, SelectionDAG &DAG,
5964 const TargetLowering &TLI, SDLoc dl) {
5965 assert(VT.is128BitVector() && "Unknown type for VShift");
5966 EVT ShVT = MVT::v2i64;
5967 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5968 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5969 return DAG.getNode(ISD::BITCAST, dl, VT,
5970 DAG.getNode(Opc, dl, ShVT, SrcOp,
5971 DAG.getConstant(NumBits,
5972 TLI.getScalarShiftAmountTy(SrcOp.getValueType()))));
5976 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5978 // Check if the scalar load can be widened into a vector load. And if
5979 // the address is "base + cst" see if the cst can be "absorbed" into
5980 // the shuffle mask.
5981 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5982 SDValue Ptr = LD->getBasePtr();
5983 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5985 EVT PVT = LD->getValueType(0);
5986 if (PVT != MVT::i32 && PVT != MVT::f32)
5991 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5992 FI = FINode->getIndex();
5994 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5995 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5996 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5997 Offset = Ptr.getConstantOperandVal(1);
5998 Ptr = Ptr.getOperand(0);
6003 // FIXME: 256-bit vector instructions don't require a strict alignment,
6004 // improve this code to support it better.
6005 unsigned RequiredAlign = VT.getSizeInBits()/8;
6006 SDValue Chain = LD->getChain();
6007 // Make sure the stack object alignment is at least 16 or 32.
6008 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
6009 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
6010 if (MFI->isFixedObjectIndex(FI)) {
6011 // Can't change the alignment. FIXME: It's possible to compute
6012 // the exact stack offset and reference FI + adjust offset instead.
6013 // If someone *really* cares about this. That's the way to implement it.
6016 MFI->setObjectAlignment(FI, RequiredAlign);
6020 // (Offset % 16 or 32) must be multiple of 4. Then address is then
6021 // Ptr + (Offset & ~15).
6024 if ((Offset % RequiredAlign) & 3)
6026 int64_t StartOffset = Offset & ~(RequiredAlign-1);
6028 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
6029 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
6031 int EltNo = (Offset - StartOffset) >> 2;
6032 unsigned NumElems = VT.getVectorNumElements();
6034 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6035 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6036 LD->getPointerInfo().getWithOffset(StartOffset),
6037 false, false, false, 0);
6039 SmallVector<int, 8> Mask;
6040 for (unsigned i = 0; i != NumElems; ++i)
6041 Mask.push_back(EltNo);
6043 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6049 /// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
6050 /// vector of type 'VT', see if the elements can be replaced by a single large
6051 /// load which has the same value as a build_vector whose operands are 'elts'.
6053 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6055 /// FIXME: we'd also like to handle the case where the last elements are zero
6056 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6057 /// There's even a handy isZeroNode for that purpose.
6058 static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
6059 SDLoc &DL, SelectionDAG &DAG,
6060 bool isAfterLegalize) {
6061 EVT EltVT = VT.getVectorElementType();
6062 unsigned NumElems = Elts.size();
6064 LoadSDNode *LDBase = nullptr;
6065 unsigned LastLoadedElt = -1U;
6067 // For each element in the initializer, see if we've found a load or an undef.
6068 // If we don't find an initial load element, or later load elements are
6069 // non-consecutive, bail out.
6070 for (unsigned i = 0; i < NumElems; ++i) {
6071 SDValue Elt = Elts[i];
6073 if (!Elt.getNode() ||
6074 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6077 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6079 LDBase = cast<LoadSDNode>(Elt.getNode());
6083 if (Elt.getOpcode() == ISD::UNDEF)
6086 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6087 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i))
6092 // If we have found an entire vector of loads and undefs, then return a large
6093 // load of the entire vector width starting at the base pointer. If we found
6094 // consecutive loads for the low half, generate a vzext_load node.
6095 if (LastLoadedElt == NumElems - 1) {
6097 if (isAfterLegalize &&
6098 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6101 SDValue NewLd = SDValue();
6103 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6104 LDBase->getPointerInfo(), LDBase->isVolatile(),
6105 LDBase->isNonTemporal(), LDBase->isInvariant(),
6106 LDBase->getAlignment());
6108 if (LDBase->hasAnyUseOfValue(1)) {
6109 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6111 SDValue(NewLd.getNode(), 1));
6112 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6113 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6114 SDValue(NewLd.getNode(), 1));
6120 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6121 //of a v4i32 / v4f32. It's probably worth generalizing.
6122 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6123 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6124 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6125 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6127 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6128 LDBase->getPointerInfo(),
6129 LDBase->getAlignment(),
6130 false/*isVolatile*/, true/*ReadMem*/,
6133 // Make sure the newly-created LOAD is in the same position as LDBase in
6134 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6135 // update uses of LDBase's output chain to use the TokenFactor.
6136 if (LDBase->hasAnyUseOfValue(1)) {
6137 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6138 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6139 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6140 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6141 SDValue(ResNode.getNode(), 1));
6144 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6149 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6150 /// to generate a splat value for the following cases:
6151 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6152 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6153 /// a scalar load, or a constant.
6154 /// The VBROADCAST node is returned when a pattern is found,
6155 /// or SDValue() otherwise.
6156 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6157 SelectionDAG &DAG) {
6158 // VBROADCAST requires AVX.
6159 // TODO: Splats could be generated for non-AVX CPUs using SSE
6160 // instructions, but there's less potential gain for only 128-bit vectors.
6161 if (!Subtarget->hasAVX())
6164 MVT VT = Op.getSimpleValueType();
6167 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6168 "Unsupported vector type for broadcast.");
6173 switch (Op.getOpcode()) {
6175 // Unknown pattern found.
6178 case ISD::BUILD_VECTOR: {
6179 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6180 BitVector UndefElements;
6181 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6183 // We need a splat of a single value to use broadcast, and it doesn't
6184 // make any sense if the value is only in one element of the vector.
6185 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6189 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6190 Ld.getOpcode() == ISD::ConstantFP);
6192 // Make sure that all of the users of a non-constant load are from the
6193 // BUILD_VECTOR node.
6194 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6199 case ISD::VECTOR_SHUFFLE: {
6200 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6202 // Shuffles must have a splat mask where the first element is
6204 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6207 SDValue Sc = Op.getOperand(0);
6208 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6209 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6211 if (!Subtarget->hasInt256())
6214 // Use the register form of the broadcast instruction available on AVX2.
6215 if (VT.getSizeInBits() >= 256)
6216 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6217 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6220 Ld = Sc.getOperand(0);
6221 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6222 Ld.getOpcode() == ISD::ConstantFP);
6224 // The scalar_to_vector node and the suspected
6225 // load node must have exactly one user.
6226 // Constants may have multiple users.
6228 // AVX-512 has register version of the broadcast
6229 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6230 Ld.getValueType().getSizeInBits() >= 32;
6231 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6238 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6239 bool IsGE256 = (VT.getSizeInBits() >= 256);
6241 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6242 // instruction to save 8 or more bytes of constant pool data.
6243 // TODO: If multiple splats are generated to load the same constant,
6244 // it may be detrimental to overall size. There needs to be a way to detect
6245 // that condition to know if this is truly a size win.
6246 const Function *F = DAG.getMachineFunction().getFunction();
6247 bool OptForSize = F->getAttributes().
6248 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
6250 // Handle broadcasting a single constant scalar from the constant pool
6252 // On Sandybridge (no AVX2), it is still better to load a constant vector
6253 // from the constant pool and not to broadcast it from a scalar.
6254 // But override that restriction when optimizing for size.
6255 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6256 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6257 EVT CVT = Ld.getValueType();
6258 assert(!CVT.isVector() && "Must not broadcast a vector type");
6260 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6261 // For size optimization, also splat v2f64 and v2i64, and for size opt
6262 // with AVX2, also splat i8 and i16.
6263 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6264 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6265 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6266 const Constant *C = nullptr;
6267 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6268 C = CI->getConstantIntValue();
6269 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6270 C = CF->getConstantFPValue();
6272 assert(C && "Invalid constant type");
6274 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6275 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6276 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6277 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6278 MachinePointerInfo::getConstantPool(),
6279 false, false, false, Alignment);
6281 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6285 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6287 // Handle AVX2 in-register broadcasts.
6288 if (!IsLoad && Subtarget->hasInt256() &&
6289 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6290 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6292 // The scalar source must be a normal load.
6296 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6297 (Subtarget->hasVLX() && ScalarSize == 64))
6298 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6300 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6301 // double since there is no vbroadcastsd xmm
6302 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6303 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6304 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6307 // Unsupported broadcast.
6311 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6312 /// underlying vector and index.
6314 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6316 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6318 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6319 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6322 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6324 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6326 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6327 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6330 // In this case the vector is the extract_subvector expression and the index
6331 // is 2, as specified by the shuffle.
6332 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6333 SDValue ShuffleVec = SVOp->getOperand(0);
6334 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6335 assert(ShuffleVecVT.getVectorElementType() ==
6336 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6338 int ShuffleIdx = SVOp->getMaskElt(Idx);
6339 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6340 ExtractedFromVec = ShuffleVec;
6346 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6347 MVT VT = Op.getSimpleValueType();
6349 // Skip if insert_vec_elt is not supported.
6350 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6351 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6355 unsigned NumElems = Op.getNumOperands();
6359 SmallVector<unsigned, 4> InsertIndices;
6360 SmallVector<int, 8> Mask(NumElems, -1);
6362 for (unsigned i = 0; i != NumElems; ++i) {
6363 unsigned Opc = Op.getOperand(i).getOpcode();
6365 if (Opc == ISD::UNDEF)
6368 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6369 // Quit if more than 1 elements need inserting.
6370 if (InsertIndices.size() > 1)
6373 InsertIndices.push_back(i);
6377 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6378 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6379 // Quit if non-constant index.
6380 if (!isa<ConstantSDNode>(ExtIdx))
6382 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6384 // Quit if extracted from vector of different type.
6385 if (ExtractedFromVec.getValueType() != VT)
6388 if (!VecIn1.getNode())
6389 VecIn1 = ExtractedFromVec;
6390 else if (VecIn1 != ExtractedFromVec) {
6391 if (!VecIn2.getNode())
6392 VecIn2 = ExtractedFromVec;
6393 else if (VecIn2 != ExtractedFromVec)
6394 // Quit if more than 2 vectors to shuffle
6398 if (ExtractedFromVec == VecIn1)
6400 else if (ExtractedFromVec == VecIn2)
6401 Mask[i] = Idx + NumElems;
6404 if (!VecIn1.getNode())
6407 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6408 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6409 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6410 unsigned Idx = InsertIndices[i];
6411 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6412 DAG.getIntPtrConstant(Idx));
6418 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6420 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6422 MVT VT = Op.getSimpleValueType();
6423 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6424 "Unexpected type in LowerBUILD_VECTORvXi1!");
6427 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6428 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6429 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6430 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6433 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6434 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6435 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6436 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6439 bool AllContants = true;
6440 uint64_t Immediate = 0;
6441 int NonConstIdx = -1;
6442 bool IsSplat = true;
6443 unsigned NumNonConsts = 0;
6444 unsigned NumConsts = 0;
6445 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6446 SDValue In = Op.getOperand(idx);
6447 if (In.getOpcode() == ISD::UNDEF)
6449 if (!isa<ConstantSDNode>(In)) {
6450 AllContants = false;
6455 if (cast<ConstantSDNode>(In)->getZExtValue())
6456 Immediate |= (1ULL << idx);
6458 if (In != Op.getOperand(0))
6463 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6464 DAG.getConstant(Immediate, MVT::i16));
6465 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6466 DAG.getIntPtrConstant(0));
6469 if (NumNonConsts == 1 && NonConstIdx != 0) {
6472 SDValue VecAsImm = DAG.getConstant(Immediate,
6473 MVT::getIntegerVT(VT.getSizeInBits()));
6474 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6477 DstVec = DAG.getUNDEF(VT);
6478 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6479 Op.getOperand(NonConstIdx),
6480 DAG.getIntPtrConstant(NonConstIdx));
6482 if (!IsSplat && (NonConstIdx != 0))
6483 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6484 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6487 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6488 DAG.getConstant(-1, SelectVT),
6489 DAG.getConstant(0, SelectVT));
6491 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6492 DAG.getConstant((Immediate | 1), SelectVT),
6493 DAG.getConstant(Immediate, SelectVT));
6494 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6497 /// \brief Return true if \p N implements a horizontal binop and return the
6498 /// operands for the horizontal binop into V0 and V1.
6500 /// This is a helper function of PerformBUILD_VECTORCombine.
6501 /// This function checks that the build_vector \p N in input implements a
6502 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6503 /// operation to match.
6504 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6505 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6506 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6509 /// This function only analyzes elements of \p N whose indices are
6510 /// in range [BaseIdx, LastIdx).
6511 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6513 unsigned BaseIdx, unsigned LastIdx,
6514 SDValue &V0, SDValue &V1) {
6515 EVT VT = N->getValueType(0);
6517 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6518 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6519 "Invalid Vector in input!");
6521 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6522 bool CanFold = true;
6523 unsigned ExpectedVExtractIdx = BaseIdx;
6524 unsigned NumElts = LastIdx - BaseIdx;
6525 V0 = DAG.getUNDEF(VT);
6526 V1 = DAG.getUNDEF(VT);
6528 // Check if N implements a horizontal binop.
6529 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6530 SDValue Op = N->getOperand(i + BaseIdx);
6533 if (Op->getOpcode() == ISD::UNDEF) {
6534 // Update the expected vector extract index.
6535 if (i * 2 == NumElts)
6536 ExpectedVExtractIdx = BaseIdx;
6537 ExpectedVExtractIdx += 2;
6541 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6546 SDValue Op0 = Op.getOperand(0);
6547 SDValue Op1 = Op.getOperand(1);
6549 // Try to match the following pattern:
6550 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6551 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6552 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6553 Op0.getOperand(0) == Op1.getOperand(0) &&
6554 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6555 isa<ConstantSDNode>(Op1.getOperand(1)));
6559 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6560 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6562 if (i * 2 < NumElts) {
6563 if (V0.getOpcode() == ISD::UNDEF)
6564 V0 = Op0.getOperand(0);
6566 if (V1.getOpcode() == ISD::UNDEF)
6567 V1 = Op0.getOperand(0);
6568 if (i * 2 == NumElts)
6569 ExpectedVExtractIdx = BaseIdx;
6572 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6573 if (I0 == ExpectedVExtractIdx)
6574 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6575 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6576 // Try to match the following dag sequence:
6577 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6578 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6582 ExpectedVExtractIdx += 2;
6588 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6589 /// a concat_vector.
6591 /// This is a helper function of PerformBUILD_VECTORCombine.
6592 /// This function expects two 256-bit vectors called V0 and V1.
6593 /// At first, each vector is split into two separate 128-bit vectors.
6594 /// Then, the resulting 128-bit vectors are used to implement two
6595 /// horizontal binary operations.
6597 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6599 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6600 /// the two new horizontal binop.
6601 /// When Mode is set, the first horizontal binop dag node would take as input
6602 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6603 /// horizontal binop dag node would take as input the lower 128-bit of V1
6604 /// and the upper 128-bit of V1.
6606 /// HADD V0_LO, V0_HI
6607 /// HADD V1_LO, V1_HI
6609 /// Otherwise, the first horizontal binop dag node takes as input the lower
6610 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6611 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6613 /// HADD V0_LO, V1_LO
6614 /// HADD V0_HI, V1_HI
6616 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6617 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6618 /// the upper 128-bits of the result.
6619 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6620 SDLoc DL, SelectionDAG &DAG,
6621 unsigned X86Opcode, bool Mode,
6622 bool isUndefLO, bool isUndefHI) {
6623 EVT VT = V0.getValueType();
6624 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6625 "Invalid nodes in input!");
6627 unsigned NumElts = VT.getVectorNumElements();
6628 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6629 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6630 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6631 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6632 EVT NewVT = V0_LO.getValueType();
6634 SDValue LO = DAG.getUNDEF(NewVT);
6635 SDValue HI = DAG.getUNDEF(NewVT);
6638 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6639 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6640 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6641 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6642 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6644 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6645 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6646 V1_LO->getOpcode() != ISD::UNDEF))
6647 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6649 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6650 V1_HI->getOpcode() != ISD::UNDEF))
6651 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6654 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6657 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6658 /// sequence of 'vadd + vsub + blendi'.
6659 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6660 const X86Subtarget *Subtarget) {
6662 EVT VT = BV->getValueType(0);
6663 unsigned NumElts = VT.getVectorNumElements();
6664 SDValue InVec0 = DAG.getUNDEF(VT);
6665 SDValue InVec1 = DAG.getUNDEF(VT);
6667 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6668 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6670 // Odd-numbered elements in the input build vector are obtained from
6671 // adding two integer/float elements.
6672 // Even-numbered elements in the input build vector are obtained from
6673 // subtracting two integer/float elements.
6674 unsigned ExpectedOpcode = ISD::FSUB;
6675 unsigned NextExpectedOpcode = ISD::FADD;
6676 bool AddFound = false;
6677 bool SubFound = false;
6679 for (unsigned i = 0, e = NumElts; i != e; i++) {
6680 SDValue Op = BV->getOperand(i);
6682 // Skip 'undef' values.
6683 unsigned Opcode = Op.getOpcode();
6684 if (Opcode == ISD::UNDEF) {
6685 std::swap(ExpectedOpcode, NextExpectedOpcode);
6689 // Early exit if we found an unexpected opcode.
6690 if (Opcode != ExpectedOpcode)
6693 SDValue Op0 = Op.getOperand(0);
6694 SDValue Op1 = Op.getOperand(1);
6696 // Try to match the following pattern:
6697 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6698 // Early exit if we cannot match that sequence.
6699 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6700 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6701 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6702 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6703 Op0.getOperand(1) != Op1.getOperand(1))
6706 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6710 // We found a valid add/sub node. Update the information accordingly.
6716 // Update InVec0 and InVec1.
6717 if (InVec0.getOpcode() == ISD::UNDEF)
6718 InVec0 = Op0.getOperand(0);
6719 if (InVec1.getOpcode() == ISD::UNDEF)
6720 InVec1 = Op1.getOperand(0);
6722 // Make sure that operands in input to each add/sub node always
6723 // come from a same pair of vectors.
6724 if (InVec0 != Op0.getOperand(0)) {
6725 if (ExpectedOpcode == ISD::FSUB)
6728 // FADD is commutable. Try to commute the operands
6729 // and then test again.
6730 std::swap(Op0, Op1);
6731 if (InVec0 != Op0.getOperand(0))
6735 if (InVec1 != Op1.getOperand(0))
6738 // Update the pair of expected opcodes.
6739 std::swap(ExpectedOpcode, NextExpectedOpcode);
6742 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6743 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6744 InVec1.getOpcode() != ISD::UNDEF)
6745 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6750 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6751 const X86Subtarget *Subtarget) {
6753 EVT VT = N->getValueType(0);
6754 unsigned NumElts = VT.getVectorNumElements();
6755 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6756 SDValue InVec0, InVec1;
6758 // Try to match an ADDSUB.
6759 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6760 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6761 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6762 if (Value.getNode())
6766 // Try to match horizontal ADD/SUB.
6767 unsigned NumUndefsLO = 0;
6768 unsigned NumUndefsHI = 0;
6769 unsigned Half = NumElts/2;
6771 // Count the number of UNDEF operands in the build_vector in input.
6772 for (unsigned i = 0, e = Half; i != e; ++i)
6773 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6776 for (unsigned i = Half, e = NumElts; i != e; ++i)
6777 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6780 // Early exit if this is either a build_vector of all UNDEFs or all the
6781 // operands but one are UNDEF.
6782 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6785 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6786 // Try to match an SSE3 float HADD/HSUB.
6787 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6788 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6790 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6791 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6792 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6793 // Try to match an SSSE3 integer HADD/HSUB.
6794 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6795 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6797 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6798 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6801 if (!Subtarget->hasAVX())
6804 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6805 // Try to match an AVX horizontal add/sub of packed single/double
6806 // precision floating point values from 256-bit vectors.
6807 SDValue InVec2, InVec3;
6808 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6809 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6810 ((InVec0.getOpcode() == ISD::UNDEF ||
6811 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6812 ((InVec1.getOpcode() == ISD::UNDEF ||
6813 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6814 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6816 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6817 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6818 ((InVec0.getOpcode() == ISD::UNDEF ||
6819 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6820 ((InVec1.getOpcode() == ISD::UNDEF ||
6821 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6822 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6823 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6824 // Try to match an AVX2 horizontal add/sub of signed integers.
6825 SDValue InVec2, InVec3;
6827 bool CanFold = true;
6829 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6830 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6831 ((InVec0.getOpcode() == ISD::UNDEF ||
6832 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6833 ((InVec1.getOpcode() == ISD::UNDEF ||
6834 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6835 X86Opcode = X86ISD::HADD;
6836 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6837 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6838 ((InVec0.getOpcode() == ISD::UNDEF ||
6839 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6840 ((InVec1.getOpcode() == ISD::UNDEF ||
6841 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6842 X86Opcode = X86ISD::HSUB;
6847 // Fold this build_vector into a single horizontal add/sub.
6848 // Do this only if the target has AVX2.
6849 if (Subtarget->hasAVX2())
6850 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6852 // Do not try to expand this build_vector into a pair of horizontal
6853 // add/sub if we can emit a pair of scalar add/sub.
6854 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6857 // Convert this build_vector into a pair of horizontal binop followed by
6859 bool isUndefLO = NumUndefsLO == Half;
6860 bool isUndefHI = NumUndefsHI == Half;
6861 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6862 isUndefLO, isUndefHI);
6866 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6867 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6869 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6870 X86Opcode = X86ISD::HADD;
6871 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6872 X86Opcode = X86ISD::HSUB;
6873 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6874 X86Opcode = X86ISD::FHADD;
6875 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6876 X86Opcode = X86ISD::FHSUB;
6880 // Don't try to expand this build_vector into a pair of horizontal add/sub
6881 // if we can simply emit a pair of scalar add/sub.
6882 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6885 // Convert this build_vector into two horizontal add/sub followed by
6887 bool isUndefLO = NumUndefsLO == Half;
6888 bool isUndefHI = NumUndefsHI == Half;
6889 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6890 isUndefLO, isUndefHI);
6897 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6900 MVT VT = Op.getSimpleValueType();
6901 MVT ExtVT = VT.getVectorElementType();
6902 unsigned NumElems = Op.getNumOperands();
6904 // Generate vectors for predicate vectors.
6905 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6906 return LowerBUILD_VECTORvXi1(Op, DAG);
6908 // Vectors containing all zeros can be matched by pxor and xorps later
6909 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6910 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6911 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6912 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6915 return getZeroVector(VT, Subtarget, DAG, dl);
6918 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6919 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6920 // vpcmpeqd on 256-bit vectors.
6921 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6922 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6925 if (!VT.is512BitVector())
6926 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6929 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6930 if (Broadcast.getNode())
6933 unsigned EVTBits = ExtVT.getSizeInBits();
6935 unsigned NumZero = 0;
6936 unsigned NumNonZero = 0;
6937 unsigned NonZeros = 0;
6938 bool IsAllConstants = true;
6939 SmallSet<SDValue, 8> Values;
6940 for (unsigned i = 0; i < NumElems; ++i) {
6941 SDValue Elt = Op.getOperand(i);
6942 if (Elt.getOpcode() == ISD::UNDEF)
6945 if (Elt.getOpcode() != ISD::Constant &&
6946 Elt.getOpcode() != ISD::ConstantFP)
6947 IsAllConstants = false;
6948 if (X86::isZeroNode(Elt))
6951 NonZeros |= (1 << i);
6956 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6957 if (NumNonZero == 0)
6958 return DAG.getUNDEF(VT);
6960 // Special case for single non-zero, non-undef, element.
6961 if (NumNonZero == 1) {
6962 unsigned Idx = countTrailingZeros(NonZeros);
6963 SDValue Item = Op.getOperand(Idx);
6965 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6966 // the value are obviously zero, truncate the value to i32 and do the
6967 // insertion that way. Only do this if the value is non-constant or if the
6968 // value is a constant being inserted into element 0. It is cheaper to do
6969 // a constant pool load than it is to do a movd + shuffle.
6970 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6971 (!IsAllConstants || Idx == 0)) {
6972 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6974 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6975 EVT VecVT = MVT::v4i32;
6976 unsigned VecElts = 4;
6978 // Truncate the value (which may itself be a constant) to i32, and
6979 // convert it to a vector with movd (S2V+shuffle to zero extend).
6980 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6981 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6983 // If using the new shuffle lowering, just directly insert this.
6984 if (ExperimentalVectorShuffleLowering)
6986 ISD::BITCAST, dl, VT,
6987 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6989 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6991 // Now we have our 32-bit value zero extended in the low element of
6992 // a vector. If Idx != 0, swizzle it into place.
6994 SmallVector<int, 4> Mask;
6995 Mask.push_back(Idx);
6996 for (unsigned i = 1; i != VecElts; ++i)
6998 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
7001 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7005 // If we have a constant or non-constant insertion into the low element of
7006 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
7007 // the rest of the elements. This will be matched as movd/movq/movss/movsd
7008 // depending on what the source datatype is.
7011 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7013 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
7014 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
7015 if (VT.is256BitVector() || VT.is512BitVector()) {
7016 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
7017 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
7018 Item, DAG.getIntPtrConstant(0));
7020 assert(VT.is128BitVector() && "Expected an SSE value type!");
7021 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7022 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7023 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7026 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7027 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7028 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7029 if (VT.is256BitVector()) {
7030 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7031 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7033 assert(VT.is128BitVector() && "Expected an SSE value type!");
7034 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7036 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7040 // Is it a vector logical left shift?
7041 if (NumElems == 2 && Idx == 1 &&
7042 X86::isZeroNode(Op.getOperand(0)) &&
7043 !X86::isZeroNode(Op.getOperand(1))) {
7044 unsigned NumBits = VT.getSizeInBits();
7045 return getVShift(true, VT,
7046 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7047 VT, Op.getOperand(1)),
7048 NumBits/2, DAG, *this, dl);
7051 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7054 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7055 // is a non-constant being inserted into an element other than the low one,
7056 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7057 // movd/movss) to move this into the low element, then shuffle it into
7059 if (EVTBits == 32) {
7060 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7062 // If using the new shuffle lowering, just directly insert this.
7063 if (ExperimentalVectorShuffleLowering)
7064 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7066 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7067 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7068 SmallVector<int, 8> MaskVec;
7069 for (unsigned i = 0; i != NumElems; ++i)
7070 MaskVec.push_back(i == Idx ? 0 : 1);
7071 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7075 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7076 if (Values.size() == 1) {
7077 if (EVTBits == 32) {
7078 // Instead of a shuffle like this:
7079 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7080 // Check if it's possible to issue this instead.
7081 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7082 unsigned Idx = countTrailingZeros(NonZeros);
7083 SDValue Item = Op.getOperand(Idx);
7084 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7085 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7090 // A vector full of immediates; various special cases are already
7091 // handled, so this is best done with a single constant-pool load.
7095 // For AVX-length vectors, see if we can use a vector load to get all of the
7096 // elements, otherwise build the individual 128-bit pieces and use
7097 // shuffles to put them in place.
7098 if (VT.is256BitVector() || VT.is512BitVector()) {
7099 SmallVector<SDValue, 64> V;
7100 for (unsigned i = 0; i != NumElems; ++i)
7101 V.push_back(Op.getOperand(i));
7103 // Check for a build vector of consecutive loads.
7104 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7107 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7109 // Build both the lower and upper subvector.
7110 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7111 makeArrayRef(&V[0], NumElems/2));
7112 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7113 makeArrayRef(&V[NumElems / 2], NumElems/2));
7115 // Recreate the wider vector with the lower and upper part.
7116 if (VT.is256BitVector())
7117 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7118 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7121 // Let legalizer expand 2-wide build_vectors.
7122 if (EVTBits == 64) {
7123 if (NumNonZero == 1) {
7124 // One half is zero or undef.
7125 unsigned Idx = countTrailingZeros(NonZeros);
7126 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7127 Op.getOperand(Idx));
7128 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7133 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7134 if (EVTBits == 8 && NumElems == 16) {
7135 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7137 if (V.getNode()) return V;
7140 if (EVTBits == 16 && NumElems == 8) {
7141 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7143 if (V.getNode()) return V;
7146 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7147 if (EVTBits == 32 && NumElems == 4) {
7148 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7153 // If element VT is == 32 bits, turn it into a number of shuffles.
7154 SmallVector<SDValue, 8> V(NumElems);
7155 if (NumElems == 4 && NumZero > 0) {
7156 for (unsigned i = 0; i < 4; ++i) {
7157 bool isZero = !(NonZeros & (1 << i));
7159 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7161 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7164 for (unsigned i = 0; i < 2; ++i) {
7165 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7168 V[i] = V[i*2]; // Must be a zero vector.
7171 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7174 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7177 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7182 bool Reverse1 = (NonZeros & 0x3) == 2;
7183 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7187 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7188 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7190 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7193 if (Values.size() > 1 && VT.is128BitVector()) {
7194 // Check for a build vector of consecutive loads.
7195 for (unsigned i = 0; i < NumElems; ++i)
7196 V[i] = Op.getOperand(i);
7198 // Check for elements which are consecutive loads.
7199 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7203 // Check for a build vector from mostly shuffle plus few inserting.
7204 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7208 // For SSE 4.1, use insertps to put the high elements into the low element.
7209 if (getSubtarget()->hasSSE41()) {
7211 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7212 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7214 Result = DAG.getUNDEF(VT);
7216 for (unsigned i = 1; i < NumElems; ++i) {
7217 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7218 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7219 Op.getOperand(i), DAG.getIntPtrConstant(i));
7224 // Otherwise, expand into a number of unpckl*, start by extending each of
7225 // our (non-undef) elements to the full vector width with the element in the
7226 // bottom slot of the vector (which generates no code for SSE).
7227 for (unsigned i = 0; i < NumElems; ++i) {
7228 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7229 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7231 V[i] = DAG.getUNDEF(VT);
7234 // Next, we iteratively mix elements, e.g. for v4f32:
7235 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7236 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7237 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7238 unsigned EltStride = NumElems >> 1;
7239 while (EltStride != 0) {
7240 for (unsigned i = 0; i < EltStride; ++i) {
7241 // If V[i+EltStride] is undef and this is the first round of mixing,
7242 // then it is safe to just drop this shuffle: V[i] is already in the
7243 // right place, the one element (since it's the first round) being
7244 // inserted as undef can be dropped. This isn't safe for successive
7245 // rounds because they will permute elements within both vectors.
7246 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7247 EltStride == NumElems/2)
7250 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7259 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7260 // to create 256-bit vectors from two other 128-bit ones.
7261 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7263 MVT ResVT = Op.getSimpleValueType();
7265 assert((ResVT.is256BitVector() ||
7266 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7268 SDValue V1 = Op.getOperand(0);
7269 SDValue V2 = Op.getOperand(1);
7270 unsigned NumElems = ResVT.getVectorNumElements();
7271 if(ResVT.is256BitVector())
7272 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7274 if (Op.getNumOperands() == 4) {
7275 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7276 ResVT.getVectorNumElements()/2);
7277 SDValue V3 = Op.getOperand(2);
7278 SDValue V4 = Op.getOperand(3);
7279 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7280 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7282 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7285 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7286 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7287 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7288 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7289 Op.getNumOperands() == 4)));
7291 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7292 // from two other 128-bit ones.
7294 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7295 return LowerAVXCONCAT_VECTORS(Op, DAG);
7299 //===----------------------------------------------------------------------===//
7300 // Vector shuffle lowering
7302 // This is an experimental code path for lowering vector shuffles on x86. It is
7303 // designed to handle arbitrary vector shuffles and blends, gracefully
7304 // degrading performance as necessary. It works hard to recognize idiomatic
7305 // shuffles and lower them to optimal instruction patterns without leaving
7306 // a framework that allows reasonably efficient handling of all vector shuffle
7308 //===----------------------------------------------------------------------===//
7310 /// \brief Tiny helper function to identify a no-op mask.
7312 /// This is a somewhat boring predicate function. It checks whether the mask
7313 /// array input, which is assumed to be a single-input shuffle mask of the kind
7314 /// used by the X86 shuffle instructions (not a fully general
7315 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7316 /// in-place shuffle are 'no-op's.
7317 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7318 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7319 if (Mask[i] != -1 && Mask[i] != i)
7324 /// \brief Helper function to classify a mask as a single-input mask.
7326 /// This isn't a generic single-input test because in the vector shuffle
7327 /// lowering we canonicalize single inputs to be the first input operand. This
7328 /// means we can more quickly test for a single input by only checking whether
7329 /// an input from the second operand exists. We also assume that the size of
7330 /// mask corresponds to the size of the input vectors which isn't true in the
7331 /// fully general case.
7332 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7334 if (M >= (int)Mask.size())
7339 /// \brief Test whether there are elements crossing 128-bit lanes in this
7342 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7343 /// and we routinely test for these.
7344 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7345 int LaneSize = 128 / VT.getScalarSizeInBits();
7346 int Size = Mask.size();
7347 for (int i = 0; i < Size; ++i)
7348 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7353 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7355 /// This checks a shuffle mask to see if it is performing the same
7356 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7357 /// that it is also not lane-crossing. It may however involve a blend from the
7358 /// same lane of a second vector.
7360 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7361 /// non-trivial to compute in the face of undef lanes. The representation is
7362 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7363 /// entries from both V1 and V2 inputs to the wider mask.
7365 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7366 SmallVectorImpl<int> &RepeatedMask) {
7367 int LaneSize = 128 / VT.getScalarSizeInBits();
7368 RepeatedMask.resize(LaneSize, -1);
7369 int Size = Mask.size();
7370 for (int i = 0; i < Size; ++i) {
7373 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7374 // This entry crosses lanes, so there is no way to model this shuffle.
7377 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7378 if (RepeatedMask[i % LaneSize] == -1)
7379 // This is the first non-undef entry in this slot of a 128-bit lane.
7380 RepeatedMask[i % LaneSize] =
7381 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7382 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7383 // Found a mismatch with the repeated mask.
7389 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7390 // 2013 will allow us to use it as a non-type template parameter.
7393 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7395 /// See its documentation for details.
7396 bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
7397 if (Mask.size() != Args.size())
7399 for (int i = 0, e = Mask.size(); i < e; ++i) {
7400 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7401 if (Mask[i] != -1 && Mask[i] != *Args[i])
7409 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7412 /// This is a fast way to test a shuffle mask against a fixed pattern:
7414 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7416 /// It returns true if the mask is exactly as wide as the argument list, and
7417 /// each element of the mask is either -1 (signifying undef) or the value given
7418 /// in the argument.
7419 static const VariadicFunction1<
7420 bool, ArrayRef<int>, int, isShuffleEquivalentImpl> isShuffleEquivalent = {};
7422 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7424 /// This helper function produces an 8-bit shuffle immediate corresponding to
7425 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7426 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7429 /// NB: We rely heavily on "undef" masks preserving the input lane.
7430 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7431 SelectionDAG &DAG) {
7432 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7433 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7434 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7435 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7436 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7439 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7440 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7441 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7442 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7443 return DAG.getConstant(Imm, MVT::i8);
7446 /// \brief Try to emit a blend instruction for a shuffle.
7448 /// This doesn't do any checks for the availability of instructions for blending
7449 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7450 /// be matched in the backend with the type given. What it does check for is
7451 /// that the shuffle mask is in fact a blend.
7452 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7453 SDValue V2, ArrayRef<int> Mask,
7454 const X86Subtarget *Subtarget,
7455 SelectionDAG &DAG) {
7457 unsigned BlendMask = 0;
7458 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7459 if (Mask[i] >= Size) {
7460 if (Mask[i] != i + Size)
7461 return SDValue(); // Shuffled V2 input!
7462 BlendMask |= 1u << i;
7465 if (Mask[i] >= 0 && Mask[i] != i)
7466 return SDValue(); // Shuffled V1 input!
7468 switch (VT.SimpleTy) {
7473 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7474 DAG.getConstant(BlendMask, MVT::i8));
7478 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7482 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7483 // that instruction.
7484 if (Subtarget->hasAVX2()) {
7485 // Scale the blend by the number of 32-bit dwords per element.
7486 int Scale = VT.getScalarSizeInBits() / 32;
7488 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7489 if (Mask[i] >= Size)
7490 for (int j = 0; j < Scale; ++j)
7491 BlendMask |= 1u << (i * Scale + j);
7493 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7494 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7495 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7496 return DAG.getNode(ISD::BITCAST, DL, VT,
7497 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7498 DAG.getConstant(BlendMask, MVT::i8)));
7502 // For integer shuffles we need to expand the mask and cast the inputs to
7503 // v8i16s prior to blending.
7504 int Scale = 8 / VT.getVectorNumElements();
7506 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7507 if (Mask[i] >= Size)
7508 for (int j = 0; j < Scale; ++j)
7509 BlendMask |= 1u << (i * Scale + j);
7511 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7512 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7513 return DAG.getNode(ISD::BITCAST, DL, VT,
7514 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7515 DAG.getConstant(BlendMask, MVT::i8)));
7519 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7520 SmallVector<int, 8> RepeatedMask;
7521 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7522 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7523 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7525 for (int i = 0; i < 8; ++i)
7526 if (RepeatedMask[i] >= 16)
7527 BlendMask |= 1u << i;
7528 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7529 DAG.getConstant(BlendMask, MVT::i8));
7534 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7535 // Scale the blend by the number of bytes per element.
7536 int Scale = VT.getScalarSizeInBits() / 8;
7537 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7539 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7540 // mix of LLVM's code generator and the x86 backend. We tell the code
7541 // generator that boolean values in the elements of an x86 vector register
7542 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7543 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7544 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7545 // of the element (the remaining are ignored) and 0 in that high bit would
7546 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7547 // the LLVM model for boolean values in vector elements gets the relevant
7548 // bit set, it is set backwards and over constrained relative to x86's
7550 SDValue VSELECTMask[32];
7551 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7552 for (int j = 0; j < Scale; ++j)
7553 VSELECTMask[Scale * i + j] =
7554 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7555 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7557 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7558 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7560 ISD::BITCAST, DL, VT,
7561 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7562 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7567 llvm_unreachable("Not a supported integer vector type!");
7571 /// \brief Generic routine to lower a shuffle and blend as a decomposed set of
7572 /// unblended shuffles followed by an unshuffled blend.
7574 /// This matches the extremely common pattern for handling combined
7575 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7577 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7581 SelectionDAG &DAG) {
7582 // Shuffle the input elements into the desired positions in V1 and V2 and
7583 // blend them together.
7584 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7585 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7586 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7587 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7588 if (Mask[i] >= 0 && Mask[i] < Size) {
7589 V1Mask[i] = Mask[i];
7591 } else if (Mask[i] >= Size) {
7592 V2Mask[i] = Mask[i] - Size;
7593 BlendMask[i] = i + Size;
7596 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7597 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7598 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7601 /// \brief Try to lower a vector shuffle as a byte rotation.
7603 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7604 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7605 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7606 /// try to generically lower a vector shuffle through such an pattern. It
7607 /// does not check for the profitability of lowering either as PALIGNR or
7608 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7609 /// This matches shuffle vectors that look like:
7611 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7613 /// Essentially it concatenates V1 and V2, shifts right by some number of
7614 /// elements, and takes the low elements as the result. Note that while this is
7615 /// specified as a *right shift* because x86 is little-endian, it is a *left
7616 /// rotate* of the vector lanes.
7618 /// Note that this only handles 128-bit vector widths currently.
7619 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7622 const X86Subtarget *Subtarget,
7623 SelectionDAG &DAG) {
7624 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7626 // We need to detect various ways of spelling a rotation:
7627 // [11, 12, 13, 14, 15, 0, 1, 2]
7628 // [-1, 12, 13, 14, -1, -1, 1, -1]
7629 // [-1, -1, -1, -1, -1, -1, 1, 2]
7630 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7631 // [-1, 4, 5, 6, -1, -1, 9, -1]
7632 // [-1, 4, 5, 6, -1, -1, -1, -1]
7635 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7638 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7640 // Based on the mod-Size value of this mask element determine where
7641 // a rotated vector would have started.
7642 int StartIdx = i - (Mask[i] % Size);
7644 // The identity rotation isn't interesting, stop.
7647 // If we found the tail of a vector the rotation must be the missing
7648 // front. If we found the head of a vector, it must be how much of the head.
7649 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7652 Rotation = CandidateRotation;
7653 else if (Rotation != CandidateRotation)
7654 // The rotations don't match, so we can't match this mask.
7657 // Compute which value this mask is pointing at.
7658 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7660 // Compute which of the two target values this index should be assigned to.
7661 // This reflects whether the high elements are remaining or the low elements
7663 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7665 // Either set up this value if we've not encountered it before, or check
7666 // that it remains consistent.
7669 else if (TargetV != MaskV)
7670 // This may be a rotation, but it pulls from the inputs in some
7671 // unsupported interleaving.
7675 // Check that we successfully analyzed the mask, and normalize the results.
7676 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7677 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7683 assert(VT.getSizeInBits() == 128 &&
7684 "Rotate-based lowering only supports 128-bit lowering!");
7685 assert(Mask.size() <= 16 &&
7686 "Can shuffle at most 16 bytes in a 128-bit vector!");
7688 // The actual rotate instruction rotates bytes, so we need to scale the
7689 // rotation based on how many bytes are in the vector.
7690 int Scale = 16 / Mask.size();
7692 // SSSE3 targets can use the palignr instruction
7693 if (Subtarget->hasSSSE3()) {
7694 // Cast the inputs to v16i8 to match PALIGNR.
7695 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7696 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7698 return DAG.getNode(ISD::BITCAST, DL, VT,
7699 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7700 DAG.getConstant(Rotation * Scale, MVT::i8)));
7703 // Default SSE2 implementation
7704 int LoByteShift = 16 - Rotation * Scale;
7705 int HiByteShift = Rotation * Scale;
7707 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7708 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7709 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7711 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7712 DAG.getConstant(8 * LoByteShift, MVT::i8));
7713 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7714 DAG.getConstant(8 * HiByteShift, MVT::i8));
7715 return DAG.getNode(ISD::BITCAST, DL, VT,
7716 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7719 /// \brief Compute whether each element of a shuffle is zeroable.
7721 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7722 /// Either it is an undef element in the shuffle mask, the element of the input
7723 /// referenced is undef, or the element of the input referenced is known to be
7724 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7725 /// as many lanes with this technique as possible to simplify the remaining
7727 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7728 SDValue V1, SDValue V2) {
7729 SmallBitVector Zeroable(Mask.size(), false);
7731 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7732 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7734 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7736 // Handle the easy cases.
7737 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7742 // If this is an index into a build_vector node, dig out the input value and
7744 SDValue V = M < Size ? V1 : V2;
7745 if (V.getOpcode() != ISD::BUILD_VECTOR)
7748 SDValue Input = V.getOperand(M % Size);
7749 // The UNDEF opcode check really should be dead code here, but not quite
7750 // worth asserting on (it isn't invalid, just unexpected).
7751 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7758 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7760 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
7761 /// byte-shift instructions. The mask must consist of a shifted sequential
7762 /// shuffle from one of the input vectors and zeroable elements for the
7763 /// remaining 'shifted in' elements.
7765 /// Note that this only handles 128-bit vector widths currently.
7766 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7767 SDValue V2, ArrayRef<int> Mask,
7768 SelectionDAG &DAG) {
7769 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7771 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7773 int Size = Mask.size();
7774 int Scale = 16 / Size;
7776 for (int Shift = 1; Shift < Size; Shift++) {
7777 int ByteShift = Shift * Scale;
7779 // PSRLDQ : (little-endian) right byte shift
7780 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7781 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7782 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7783 bool ZeroableRight = true;
7784 for (int i = Size - Shift; i < Size; i++) {
7785 ZeroableRight &= Zeroable[i];
7788 if (ZeroableRight) {
7789 bool ValidShiftRight1 =
7790 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
7791 bool ValidShiftRight2 =
7792 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
7794 if (ValidShiftRight1 || ValidShiftRight2) {
7795 // Cast the inputs to v2i64 to match PSRLDQ.
7796 SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
7797 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7798 SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
7799 DAG.getConstant(ByteShift * 8, MVT::i8));
7800 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7804 // PSLLDQ : (little-endian) left byte shift
7805 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7806 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7807 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7808 bool ZeroableLeft = true;
7809 for (int i = 0; i < Shift; i++) {
7810 ZeroableLeft &= Zeroable[i];
7814 bool ValidShiftLeft1 =
7815 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
7816 bool ValidShiftLeft2 =
7817 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
7819 if (ValidShiftLeft1 || ValidShiftLeft2) {
7820 // Cast the inputs to v2i64 to match PSLLDQ.
7821 SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
7822 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7823 SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
7824 DAG.getConstant(ByteShift * 8, MVT::i8));
7825 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7833 /// \brief Lower a vector shuffle as a zero or any extension.
7835 /// Given a specific number of elements, element bit width, and extension
7836 /// stride, produce either a zero or any extension based on the available
7837 /// features of the subtarget.
7838 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7839 SDLoc DL, MVT VT, int NumElements, int Scale, bool AnyExt, SDValue InputV,
7840 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7841 assert(Scale > 1 && "Need a scale to extend.");
7842 int EltBits = VT.getSizeInBits() / NumElements;
7843 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7844 "Only 8, 16, and 32 bit elements can be extended.");
7845 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7847 // Found a valid zext mask! Try various lowering strategies based on the
7848 // input type and available ISA extensions.
7849 if (Subtarget->hasSSE41()) {
7850 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7851 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7852 NumElements / Scale);
7853 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7854 return DAG.getNode(ISD::BITCAST, DL, VT,
7855 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7858 // For any extends we can cheat for larger element sizes and use shuffle
7859 // instructions that can fold with a load and/or copy.
7860 if (AnyExt && EltBits == 32) {
7861 int PSHUFDMask[4] = {0, -1, 1, -1};
7863 ISD::BITCAST, DL, VT,
7864 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7865 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7866 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
7868 if (AnyExt && EltBits == 16 && Scale > 2) {
7869 int PSHUFDMask[4] = {0, -1, 0, -1};
7870 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7871 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7872 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
7873 int PSHUFHWMask[4] = {1, -1, -1, -1};
7875 ISD::BITCAST, DL, VT,
7876 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
7877 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
7878 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
7881 // If this would require more than 2 unpack instructions to expand, use
7882 // pshufb when available. We can only use more than 2 unpack instructions
7883 // when zero extending i8 elements which also makes it easier to use pshufb.
7884 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
7885 assert(NumElements == 16 && "Unexpected byte vector width!");
7886 SDValue PSHUFBMask[16];
7887 for (int i = 0; i < 16; ++i)
7889 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
7890 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
7891 return DAG.getNode(ISD::BITCAST, DL, VT,
7892 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
7893 DAG.getNode(ISD::BUILD_VECTOR, DL,
7894 MVT::v16i8, PSHUFBMask)));
7897 // Otherwise emit a sequence of unpacks.
7899 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7900 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
7901 : getZeroVector(InputVT, Subtarget, DAG, DL);
7902 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7903 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
7907 } while (Scale > 1);
7908 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
7911 /// \brief Try to lower a vector shuffle as a zero extension on any micrarch.
7913 /// This routine will try to do everything in its power to cleverly lower
7914 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
7915 /// check for the profitability of this lowering, it tries to aggressively
7916 /// match this pattern. It will use all of the micro-architectural details it
7917 /// can to emit an efficient lowering. It handles both blends with all-zero
7918 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
7919 /// masking out later).
7921 /// The reason we have dedicated lowering for zext-style shuffles is that they
7922 /// are both incredibly common and often quite performance sensitive.
7923 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
7924 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7925 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7926 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7928 int Bits = VT.getSizeInBits();
7929 int NumElements = Mask.size();
7931 // Define a helper function to check a particular ext-scale and lower to it if
7933 auto Lower = [&](int Scale) -> SDValue {
7936 for (int i = 0; i < NumElements; ++i) {
7938 continue; // Valid anywhere but doesn't tell us anything.
7939 if (i % Scale != 0) {
7940 // Each of the extend elements needs to be zeroable.
7944 // We no lorger are in the anyext case.
7949 // Each of the base elements needs to be consecutive indices into the
7950 // same input vector.
7951 SDValue V = Mask[i] < NumElements ? V1 : V2;
7954 else if (InputV != V)
7955 return SDValue(); // Flip-flopping inputs.
7957 if (Mask[i] % NumElements != i / Scale)
7958 return SDValue(); // Non-consecutive strided elemenst.
7961 // If we fail to find an input, we have a zero-shuffle which should always
7962 // have already been handled.
7963 // FIXME: Maybe handle this here in case during blending we end up with one?
7967 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7968 DL, VT, NumElements, Scale, AnyExt, InputV, Subtarget, DAG);
7971 // The widest scale possible for extending is to a 64-bit integer.
7972 assert(Bits % 64 == 0 &&
7973 "The number of bits in a vector must be divisible by 64 on x86!");
7974 int NumExtElements = Bits / 64;
7976 // Each iteration, try extending the elements half as much, but into twice as
7978 for (; NumExtElements < NumElements; NumExtElements *= 2) {
7979 assert(NumElements % NumExtElements == 0 &&
7980 "The input vector size must be divisble by the extended size.");
7981 if (SDValue V = Lower(NumElements / NumExtElements))
7985 // No viable ext lowering found.
7989 /// \brief Try to get a scalar value for a specific element of a vector.
7991 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
7992 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
7993 SelectionDAG &DAG) {
7994 MVT VT = V.getSimpleValueType();
7995 MVT EltVT = VT.getVectorElementType();
7996 while (V.getOpcode() == ISD::BITCAST)
7997 V = V.getOperand(0);
7998 // If the bitcasts shift the element size, we can't extract an equivalent
8000 MVT NewVT = V.getSimpleValueType();
8001 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8004 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8005 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8006 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8011 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8013 /// This is particularly important because the set of instructions varies
8014 /// significantly based on whether the operand is a load or not.
8015 static bool isShuffleFoldableLoad(SDValue V) {
8016 while (V.getOpcode() == ISD::BITCAST)
8017 V = V.getOperand(0);
8019 return ISD::isNON_EXTLoad(V.getNode());
8022 /// \brief Try to lower insertion of a single element into a zero vector.
8024 /// This is a common pattern that we have especially efficient patterns to lower
8025 /// across all subtarget feature sets.
8026 static SDValue lowerVectorShuffleAsElementInsertion(
8027 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8028 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8029 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8031 MVT EltVT = VT.getVectorElementType();
8033 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8034 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8036 bool IsV1Zeroable = true;
8037 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8038 if (i != V2Index && !Zeroable[i]) {
8039 IsV1Zeroable = false;
8043 // Check for a single input from a SCALAR_TO_VECTOR node.
8044 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8045 // all the smarts here sunk into that routine. However, the current
8046 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8047 // vector shuffle lowering is dead.
8048 if (SDValue V2S = getScalarValueForVectorElement(
8049 V2, Mask[V2Index] - Mask.size(), DAG)) {
8050 // We need to zext the scalar if it is smaller than an i32.
8051 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8052 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8053 // Using zext to expand a narrow element won't work for non-zero
8058 // Zero-extend directly to i32.
8060 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8062 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8063 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8064 EltVT == MVT::i16) {
8065 // Either not inserting from the low element of the input or the input
8066 // element size is too small to use VZEXT_MOVL to clear the high bits.
8070 if (!IsV1Zeroable) {
8071 // If V1 can't be treated as a zero vector we have fewer options to lower
8072 // this. We can't support integer vectors or non-zero targets cheaply, and
8073 // the V1 elements can't be permuted in any way.
8074 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8075 if (!VT.isFloatingPoint() || V2Index != 0)
8077 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8078 V1Mask[V2Index] = -1;
8079 if (!isNoopShuffleMask(V1Mask))
8081 // This is essentially a special case blend operation, but if we have
8082 // general purpose blend operations, they are always faster. Bail and let
8083 // the rest of the lowering handle these as blends.
8084 if (Subtarget->hasSSE41())
8087 // Otherwise, use MOVSD or MOVSS.
8088 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8089 "Only two types of floating point element types to handle!");
8090 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8094 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8096 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8099 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8100 // the desired position. Otherwise it is more efficient to do a vector
8101 // shift left. We know that we can do a vector shift left because all
8102 // the inputs are zero.
8103 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8104 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8105 V2Shuffle[V2Index] = 0;
8106 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8108 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8110 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8112 V2Index * EltVT.getSizeInBits(),
8113 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8114 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8120 /// \brief Try to lower broadcast of a single element.
8122 /// For convenience, this code also bundles all of the subtarget feature set
8123 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8124 /// a convenient way to factor it out.
8125 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8127 const X86Subtarget *Subtarget,
8128 SelectionDAG &DAG) {
8129 if (!Subtarget->hasAVX())
8131 if (VT.isInteger() && !Subtarget->hasAVX2())
8134 // Check that the mask is a broadcast.
8135 int BroadcastIdx = -1;
8137 if (M >= 0 && BroadcastIdx == -1)
8139 else if (M >= 0 && M != BroadcastIdx)
8142 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8143 "a sorted mask where the broadcast "
8146 // Go up the chain of (vector) values to try and find a scalar load that
8147 // we can combine with the broadcast.
8149 switch (V.getOpcode()) {
8150 case ISD::CONCAT_VECTORS: {
8151 int OperandSize = Mask.size() / V.getNumOperands();
8152 V = V.getOperand(BroadcastIdx / OperandSize);
8153 BroadcastIdx %= OperandSize;
8157 case ISD::INSERT_SUBVECTOR: {
8158 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8159 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8163 int BeginIdx = (int)ConstantIdx->getZExtValue();
8165 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8166 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8167 BroadcastIdx -= BeginIdx;
8178 // Check if this is a broadcast of a scalar. We special case lowering
8179 // for scalars so that we can more effectively fold with loads.
8180 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8181 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8182 V = V.getOperand(BroadcastIdx);
8184 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8186 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8188 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8189 // We can't broadcast from a vector register w/o AVX2, and we can only
8190 // broadcast from the zero-element of a vector register.
8194 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8197 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8198 // INSERTPS when the V1 elements are already in the correct locations
8199 // because otherwise we can just always use two SHUFPS instructions which
8200 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8201 // perform INSERTPS if a single V1 element is out of place and all V2
8202 // elements are zeroable.
8203 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8205 SelectionDAG &DAG) {
8206 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8207 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8208 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8209 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8211 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8214 int V1DstIndex = -1;
8215 int V2DstIndex = -1;
8216 bool V1UsedInPlace = false;
8218 for (int i = 0; i < 4; i++) {
8219 // Synthesize a zero mask from the zeroable elements (includes undefs).
8225 // Flag if we use any V1 inputs in place.
8227 V1UsedInPlace = true;
8231 // We can only insert a single non-zeroable element.
8232 if (V1DstIndex != -1 || V2DstIndex != -1)
8236 // V1 input out of place for insertion.
8239 // V2 input for insertion.
8244 // Don't bother if we have no (non-zeroable) element for insertion.
8245 if (V1DstIndex == -1 && V2DstIndex == -1)
8248 // Determine element insertion src/dst indices. The src index is from the
8249 // start of the inserted vector, not the start of the concatenated vector.
8250 unsigned V2SrcIndex = 0;
8251 if (V1DstIndex != -1) {
8252 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8253 // and don't use the original V2 at all.
8254 V2SrcIndex = Mask[V1DstIndex];
8255 V2DstIndex = V1DstIndex;
8258 V2SrcIndex = Mask[V2DstIndex] - 4;
8261 // If no V1 inputs are used in place, then the result is created only from
8262 // the zero mask and the V2 insertion - so remove V1 dependency.
8264 V1 = DAG.getUNDEF(MVT::v4f32);
8266 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8267 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8269 // Insert the V2 element into the desired position.
8271 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8272 DAG.getConstant(InsertPSMask, MVT::i8));
8275 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8277 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8278 /// support for floating point shuffles but not integer shuffles. These
8279 /// instructions will incur a domain crossing penalty on some chips though so
8280 /// it is better to avoid lowering through this for integer vectors where
8282 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8283 const X86Subtarget *Subtarget,
8284 SelectionDAG &DAG) {
8286 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8287 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8288 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8289 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8290 ArrayRef<int> Mask = SVOp->getMask();
8291 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8293 if (isSingleInputShuffleMask(Mask)) {
8294 // Use low duplicate instructions for masks that match their pattern.
8295 if (Subtarget->hasSSE3())
8296 if (isShuffleEquivalent(Mask, 0, 0))
8297 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8299 // Straight shuffle of a single input vector. Simulate this by using the
8300 // single input as both of the "inputs" to this instruction..
8301 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8303 if (Subtarget->hasAVX()) {
8304 // If we have AVX, we can use VPERMILPS which will allow folding a load
8305 // into the shuffle.
8306 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8307 DAG.getConstant(SHUFPDMask, MVT::i8));
8310 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8311 DAG.getConstant(SHUFPDMask, MVT::i8));
8313 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8314 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8316 // Use dedicated unpack instructions for masks that match their pattern.
8317 if (isShuffleEquivalent(Mask, 0, 2))
8318 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8319 if (isShuffleEquivalent(Mask, 1, 3))
8320 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8322 // If we have a single input, insert that into V1 if we can do so cheaply.
8323 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8324 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8325 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8327 // Try inverting the insertion since for v2 masks it is easy to do and we
8328 // can't reliably sort the mask one way or the other.
8329 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8330 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8331 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8332 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8336 // Try to use one of the special instruction patterns to handle two common
8337 // blend patterns if a zero-blend above didn't work.
8338 if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
8339 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8340 // We can either use a special instruction to load over the low double or
8341 // to move just the low double.
8343 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8345 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8347 if (Subtarget->hasSSE41())
8348 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8352 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8353 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8354 DAG.getConstant(SHUFPDMask, MVT::i8));
8357 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8359 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8360 /// the integer unit to minimize domain crossing penalties. However, for blends
8361 /// it falls back to the floating point shuffle operation with appropriate bit
8363 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8364 const X86Subtarget *Subtarget,
8365 SelectionDAG &DAG) {
8367 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8368 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8369 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8370 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8371 ArrayRef<int> Mask = SVOp->getMask();
8372 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8374 if (isSingleInputShuffleMask(Mask)) {
8375 // Check for being able to broadcast a single element.
8376 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8377 Mask, Subtarget, DAG))
8380 // Straight shuffle of a single input vector. For everything from SSE2
8381 // onward this has a single fast instruction with no scary immediates.
8382 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8383 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8384 int WidenedMask[4] = {
8385 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8386 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8388 ISD::BITCAST, DL, MVT::v2i64,
8389 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8390 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8393 // Try to use byte shift instructions.
8394 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8395 DL, MVT::v2i64, V1, V2, Mask, DAG))
8398 // If we have a single input from V2 insert that into V1 if we can do so
8400 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8401 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8402 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8404 // Try inverting the insertion since for v2 masks it is easy to do and we
8405 // can't reliably sort the mask one way or the other.
8406 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8407 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8408 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8409 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8413 // Use dedicated unpack instructions for masks that match their pattern.
8414 if (isShuffleEquivalent(Mask, 0, 2))
8415 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8416 if (isShuffleEquivalent(Mask, 1, 3))
8417 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8419 if (Subtarget->hasSSE41())
8420 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8424 // Try to use byte rotation instructions.
8425 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8426 if (Subtarget->hasSSSE3())
8427 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8428 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8431 // We implement this with SHUFPD which is pretty lame because it will likely
8432 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8433 // However, all the alternatives are still more cycles and newer chips don't
8434 // have this problem. It would be really nice if x86 had better shuffles here.
8435 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8436 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8437 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8438 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8441 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8443 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8444 /// It makes no assumptions about whether this is the *best* lowering, it simply
8446 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8447 ArrayRef<int> Mask, SDValue V1,
8448 SDValue V2, SelectionDAG &DAG) {
8449 SDValue LowV = V1, HighV = V2;
8450 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8453 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8455 if (NumV2Elements == 1) {
8457 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8460 // Compute the index adjacent to V2Index and in the same half by toggling
8462 int V2AdjIndex = V2Index ^ 1;
8464 if (Mask[V2AdjIndex] == -1) {
8465 // Handles all the cases where we have a single V2 element and an undef.
8466 // This will only ever happen in the high lanes because we commute the
8467 // vector otherwise.
8469 std::swap(LowV, HighV);
8470 NewMask[V2Index] -= 4;
8472 // Handle the case where the V2 element ends up adjacent to a V1 element.
8473 // To make this work, blend them together as the first step.
8474 int V1Index = V2AdjIndex;
8475 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8476 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8477 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8479 // Now proceed to reconstruct the final blend as we have the necessary
8480 // high or low half formed.
8487 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8488 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8490 } else if (NumV2Elements == 2) {
8491 if (Mask[0] < 4 && Mask[1] < 4) {
8492 // Handle the easy case where we have V1 in the low lanes and V2 in the
8496 } else if (Mask[2] < 4 && Mask[3] < 4) {
8497 // We also handle the reversed case because this utility may get called
8498 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8499 // arrange things in the right direction.
8505 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8506 // trying to place elements directly, just blend them and set up the final
8507 // shuffle to place them.
8509 // The first two blend mask elements are for V1, the second two are for
8511 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8512 Mask[2] < 4 ? Mask[2] : Mask[3],
8513 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8514 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8515 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8516 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8518 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8521 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8522 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8523 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8524 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8527 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8528 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8531 /// \brief Lower 4-lane 32-bit floating point shuffles.
8533 /// Uses instructions exclusively from the floating point unit to minimize
8534 /// domain crossing penalties, as these are sufficient to implement all v4f32
8536 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8537 const X86Subtarget *Subtarget,
8538 SelectionDAG &DAG) {
8540 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8541 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8542 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8543 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8544 ArrayRef<int> Mask = SVOp->getMask();
8545 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8548 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8550 if (NumV2Elements == 0) {
8551 // Check for being able to broadcast a single element.
8552 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8553 Mask, Subtarget, DAG))
8556 // Use even/odd duplicate instructions for masks that match their pattern.
8557 if (Subtarget->hasSSE3()) {
8558 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
8559 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8560 if (isShuffleEquivalent(Mask, 1, 1, 3, 3))
8561 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8564 if (Subtarget->hasAVX()) {
8565 // If we have AVX, we can use VPERMILPS which will allow folding a load
8566 // into the shuffle.
8567 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8568 getV4X86ShuffleImm8ForMask(Mask, DAG));
8571 // Otherwise, use a straight shuffle of a single input vector. We pass the
8572 // input vector to both operands to simulate this with a SHUFPS.
8573 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8574 getV4X86ShuffleImm8ForMask(Mask, DAG));
8577 // Use dedicated unpack instructions for masks that match their pattern.
8578 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8579 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8580 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8581 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8583 // There are special ways we can lower some single-element blends. However, we
8584 // have custom ways we can lower more complex single-element blends below that
8585 // we defer to if both this and BLENDPS fail to match, so restrict this to
8586 // when the V2 input is targeting element 0 of the mask -- that is the fast
8588 if (NumV2Elements == 1 && Mask[0] >= 4)
8589 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8590 Mask, Subtarget, DAG))
8593 if (Subtarget->hasSSE41()) {
8594 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8598 // Use INSERTPS if we can complete the shuffle efficiently.
8599 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8603 // Otherwise fall back to a SHUFPS lowering strategy.
8604 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8607 /// \brief Lower 4-lane i32 vector shuffles.
8609 /// We try to handle these with integer-domain shuffles where we can, but for
8610 /// blends we use the floating point domain blend instructions.
8611 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8612 const X86Subtarget *Subtarget,
8613 SelectionDAG &DAG) {
8615 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8616 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8617 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8618 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8619 ArrayRef<int> Mask = SVOp->getMask();
8620 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8622 // Whenever we can lower this as a zext, that instruction is strictly faster
8623 // than any alternative. It also allows us to fold memory operands into the
8624 // shuffle in many cases.
8625 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8626 Mask, Subtarget, DAG))
8630 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8632 if (NumV2Elements == 0) {
8633 // Check for being able to broadcast a single element.
8634 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8635 Mask, Subtarget, DAG))
8638 // Straight shuffle of a single input vector. For everything from SSE2
8639 // onward this has a single fast instruction with no scary immediates.
8640 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8641 // but we aren't actually going to use the UNPCK instruction because doing
8642 // so prevents folding a load into this instruction or making a copy.
8643 const int UnpackLoMask[] = {0, 0, 1, 1};
8644 const int UnpackHiMask[] = {2, 2, 3, 3};
8645 if (isShuffleEquivalent(Mask, 0, 0, 1, 1))
8646 Mask = UnpackLoMask;
8647 else if (isShuffleEquivalent(Mask, 2, 2, 3, 3))
8648 Mask = UnpackHiMask;
8650 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8651 getV4X86ShuffleImm8ForMask(Mask, DAG));
8654 // Try to use byte shift instructions.
8655 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8656 DL, MVT::v4i32, V1, V2, Mask, DAG))
8659 // There are special ways we can lower some single-element blends.
8660 if (NumV2Elements == 1)
8661 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8662 Mask, Subtarget, DAG))
8665 // Use dedicated unpack instructions for masks that match their pattern.
8666 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8667 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8668 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8669 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8671 if (Subtarget->hasSSE41())
8672 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8676 // Try to use byte rotation instructions.
8677 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8678 if (Subtarget->hasSSSE3())
8679 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8680 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8683 // We implement this with SHUFPS because it can blend from two vectors.
8684 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8685 // up the inputs, bypassing domain shift penalties that we would encur if we
8686 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8688 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8689 DAG.getVectorShuffle(
8691 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8692 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8695 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8696 /// shuffle lowering, and the most complex part.
8698 /// The lowering strategy is to try to form pairs of input lanes which are
8699 /// targeted at the same half of the final vector, and then use a dword shuffle
8700 /// to place them onto the right half, and finally unpack the paired lanes into
8701 /// their final position.
8703 /// The exact breakdown of how to form these dword pairs and align them on the
8704 /// correct sides is really tricky. See the comments within the function for
8705 /// more of the details.
8706 static SDValue lowerV8I16SingleInputVectorShuffle(
8707 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8708 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8709 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8710 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8711 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8713 SmallVector<int, 4> LoInputs;
8714 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8715 [](int M) { return M >= 0; });
8716 std::sort(LoInputs.begin(), LoInputs.end());
8717 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8718 SmallVector<int, 4> HiInputs;
8719 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8720 [](int M) { return M >= 0; });
8721 std::sort(HiInputs.begin(), HiInputs.end());
8722 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8724 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8725 int NumHToL = LoInputs.size() - NumLToL;
8727 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8728 int NumHToH = HiInputs.size() - NumLToH;
8729 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8730 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8731 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8732 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8734 // Check for being able to broadcast a single element.
8735 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8736 Mask, Subtarget, DAG))
8739 // Try to use byte shift instructions.
8740 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8741 DL, MVT::v8i16, V, V, Mask, DAG))
8744 // Use dedicated unpack instructions for masks that match their pattern.
8745 if (isShuffleEquivalent(Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8746 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8747 if (isShuffleEquivalent(Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8748 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8750 // Try to use byte rotation instructions.
8751 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8752 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8755 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8756 // such inputs we can swap two of the dwords across the half mark and end up
8757 // with <=2 inputs to each half in each half. Once there, we can fall through
8758 // to the generic code below. For example:
8760 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8761 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8763 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8764 // and an existing 2-into-2 on the other half. In this case we may have to
8765 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8766 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8767 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8768 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8769 // half than the one we target for fixing) will be fixed when we re-enter this
8770 // path. We will also combine away any sequence of PSHUFD instructions that
8771 // result into a single instruction. Here is an example of the tricky case:
8773 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8774 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8776 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8778 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8779 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8781 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8782 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8784 // The result is fine to be handled by the generic logic.
8785 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8786 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8787 int AOffset, int BOffset) {
8788 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8789 "Must call this with A having 3 or 1 inputs from the A half.");
8790 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8791 "Must call this with B having 1 or 3 inputs from the B half.");
8792 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8793 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
8795 // Compute the index of dword with only one word among the three inputs in
8796 // a half by taking the sum of the half with three inputs and subtracting
8797 // the sum of the actual three inputs. The difference is the remaining
8800 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
8801 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
8802 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
8803 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
8804 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
8805 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
8806 int TripleNonInputIdx =
8807 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
8808 TripleDWord = TripleNonInputIdx / 2;
8810 // We use xor with one to compute the adjacent DWord to whichever one the
8812 OneInputDWord = (OneInput / 2) ^ 1;
8814 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
8815 // and BToA inputs. If there is also such a problem with the BToB and AToB
8816 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
8817 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
8818 // is essential that we don't *create* a 3<-1 as then we might oscillate.
8819 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
8820 // Compute how many inputs will be flipped by swapping these DWords. We
8822 // to balance this to ensure we don't form a 3-1 shuffle in the other
8824 int NumFlippedAToBInputs =
8825 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
8826 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
8827 int NumFlippedBToBInputs =
8828 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
8829 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
8830 if ((NumFlippedAToBInputs == 1 &&
8831 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
8832 (NumFlippedBToBInputs == 1 &&
8833 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
8834 // We choose whether to fix the A half or B half based on whether that
8835 // half has zero flipped inputs. At zero, we may not be able to fix it
8836 // with that half. We also bias towards fixing the B half because that
8837 // will more commonly be the high half, and we have to bias one way.
8838 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
8839 ArrayRef<int> Inputs) {
8840 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
8841 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
8842 PinnedIdx ^ 1) != Inputs.end();
8843 // Determine whether the free index is in the flipped dword or the
8844 // unflipped dword based on where the pinned index is. We use this bit
8845 // in an xor to conditionally select the adjacent dword.
8846 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
8847 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8848 FixFreeIdx) != Inputs.end();
8849 if (IsFixIdxInput == IsFixFreeIdxInput)
8851 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8852 FixFreeIdx) != Inputs.end();
8853 assert(IsFixIdxInput != IsFixFreeIdxInput &&
8854 "We need to be changing the number of flipped inputs!");
8855 int PSHUFHalfMask[] = {0, 1, 2, 3};
8856 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
8857 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
8859 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
8862 if (M != -1 && M == FixIdx)
8864 else if (M != -1 && M == FixFreeIdx)
8867 if (NumFlippedBToBInputs != 0) {
8869 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8870 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
8872 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
8874 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8875 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
8880 int PSHUFDMask[] = {0, 1, 2, 3};
8881 PSHUFDMask[ADWord] = BDWord;
8882 PSHUFDMask[BDWord] = ADWord;
8883 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
8884 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8885 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
8886 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8888 // Adjust the mask to match the new locations of A and B.
8890 if (M != -1 && M/2 == ADWord)
8891 M = 2 * BDWord + M % 2;
8892 else if (M != -1 && M/2 == BDWord)
8893 M = 2 * ADWord + M % 2;
8895 // Recurse back into this routine to re-compute state now that this isn't
8896 // a 3 and 1 problem.
8897 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
8900 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
8901 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
8902 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
8903 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
8905 // At this point there are at most two inputs to the low and high halves from
8906 // each half. That means the inputs can always be grouped into dwords and
8907 // those dwords can then be moved to the correct half with a dword shuffle.
8908 // We use at most one low and one high word shuffle to collect these paired
8909 // inputs into dwords, and finally a dword shuffle to place them.
8910 int PSHUFLMask[4] = {-1, -1, -1, -1};
8911 int PSHUFHMask[4] = {-1, -1, -1, -1};
8912 int PSHUFDMask[4] = {-1, -1, -1, -1};
8914 // First fix the masks for all the inputs that are staying in their
8915 // original halves. This will then dictate the targets of the cross-half
8917 auto fixInPlaceInputs =
8918 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
8919 MutableArrayRef<int> SourceHalfMask,
8920 MutableArrayRef<int> HalfMask, int HalfOffset) {
8921 if (InPlaceInputs.empty())
8923 if (InPlaceInputs.size() == 1) {
8924 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
8925 InPlaceInputs[0] - HalfOffset;
8926 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
8929 if (IncomingInputs.empty()) {
8930 // Just fix all of the in place inputs.
8931 for (int Input : InPlaceInputs) {
8932 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
8933 PSHUFDMask[Input / 2] = Input / 2;
8938 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
8939 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
8940 InPlaceInputs[0] - HalfOffset;
8941 // Put the second input next to the first so that they are packed into
8942 // a dword. We find the adjacent index by toggling the low bit.
8943 int AdjIndex = InPlaceInputs[0] ^ 1;
8944 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
8945 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
8946 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
8948 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
8949 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
8951 // Now gather the cross-half inputs and place them into a free dword of
8952 // their target half.
8953 // FIXME: This operation could almost certainly be simplified dramatically to
8954 // look more like the 3-1 fixing operation.
8955 auto moveInputsToRightHalf = [&PSHUFDMask](
8956 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
8957 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
8958 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
8960 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
8961 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
8963 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
8965 int LowWord = Word & ~1;
8966 int HighWord = Word | 1;
8967 return isWordClobbered(SourceHalfMask, LowWord) ||
8968 isWordClobbered(SourceHalfMask, HighWord);
8971 if (IncomingInputs.empty())
8974 if (ExistingInputs.empty()) {
8975 // Map any dwords with inputs from them into the right half.
8976 for (int Input : IncomingInputs) {
8977 // If the source half mask maps over the inputs, turn those into
8978 // swaps and use the swapped lane.
8979 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
8980 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
8981 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
8982 Input - SourceOffset;
8983 // We have to swap the uses in our half mask in one sweep.
8984 for (int &M : HalfMask)
8985 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
8987 else if (M == Input)
8988 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
8990 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
8991 Input - SourceOffset &&
8992 "Previous placement doesn't match!");
8994 // Note that this correctly re-maps both when we do a swap and when
8995 // we observe the other side of the swap above. We rely on that to
8996 // avoid swapping the members of the input list directly.
8997 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9000 // Map the input's dword into the correct half.
9001 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9002 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9004 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9006 "Previous placement doesn't match!");
9009 // And just directly shift any other-half mask elements to be same-half
9010 // as we will have mirrored the dword containing the element into the
9011 // same position within that half.
9012 for (int &M : HalfMask)
9013 if (M >= SourceOffset && M < SourceOffset + 4) {
9014 M = M - SourceOffset + DestOffset;
9015 assert(M >= 0 && "This should never wrap below zero!");
9020 // Ensure we have the input in a viable dword of its current half. This
9021 // is particularly tricky because the original position may be clobbered
9022 // by inputs being moved and *staying* in that half.
9023 if (IncomingInputs.size() == 1) {
9024 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9025 int InputFixed = std::find(std::begin(SourceHalfMask),
9026 std::end(SourceHalfMask), -1) -
9027 std::begin(SourceHalfMask) + SourceOffset;
9028 SourceHalfMask[InputFixed - SourceOffset] =
9029 IncomingInputs[0] - SourceOffset;
9030 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9032 IncomingInputs[0] = InputFixed;
9034 } else if (IncomingInputs.size() == 2) {
9035 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9036 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9037 // We have two non-adjacent or clobbered inputs we need to extract from
9038 // the source half. To do this, we need to map them into some adjacent
9039 // dword slot in the source mask.
9040 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9041 IncomingInputs[1] - SourceOffset};
9043 // If there is a free slot in the source half mask adjacent to one of
9044 // the inputs, place the other input in it. We use (Index XOR 1) to
9045 // compute an adjacent index.
9046 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9047 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9048 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9049 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9050 InputsFixed[1] = InputsFixed[0] ^ 1;
9051 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9052 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9053 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9054 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9055 InputsFixed[0] = InputsFixed[1] ^ 1;
9056 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9057 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9058 // The two inputs are in the same DWord but it is clobbered and the
9059 // adjacent DWord isn't used at all. Move both inputs to the free
9061 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9062 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9063 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9064 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9066 // The only way we hit this point is if there is no clobbering
9067 // (because there are no off-half inputs to this half) and there is no
9068 // free slot adjacent to one of the inputs. In this case, we have to
9069 // swap an input with a non-input.
9070 for (int i = 0; i < 4; ++i)
9071 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9072 "We can't handle any clobbers here!");
9073 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9074 "Cannot have adjacent inputs here!");
9076 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9077 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9079 // We also have to update the final source mask in this case because
9080 // it may need to undo the above swap.
9081 for (int &M : FinalSourceHalfMask)
9082 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9083 M = InputsFixed[1] + SourceOffset;
9084 else if (M == InputsFixed[1] + SourceOffset)
9085 M = (InputsFixed[0] ^ 1) + SourceOffset;
9087 InputsFixed[1] = InputsFixed[0] ^ 1;
9090 // Point everything at the fixed inputs.
9091 for (int &M : HalfMask)
9092 if (M == IncomingInputs[0])
9093 M = InputsFixed[0] + SourceOffset;
9094 else if (M == IncomingInputs[1])
9095 M = InputsFixed[1] + SourceOffset;
9097 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9098 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9101 llvm_unreachable("Unhandled input size!");
9104 // Now hoist the DWord down to the right half.
9105 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9106 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9107 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9108 for (int &M : HalfMask)
9109 for (int Input : IncomingInputs)
9111 M = FreeDWord * 2 + Input % 2;
9113 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9114 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9115 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9116 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9118 // Now enact all the shuffles we've computed to move the inputs into their
9120 if (!isNoopShuffleMask(PSHUFLMask))
9121 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9122 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9123 if (!isNoopShuffleMask(PSHUFHMask))
9124 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9125 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9126 if (!isNoopShuffleMask(PSHUFDMask))
9127 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9128 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9129 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9130 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9132 // At this point, each half should contain all its inputs, and we can then
9133 // just shuffle them into their final position.
9134 assert(std::count_if(LoMask.begin(), LoMask.end(),
9135 [](int M) { return M >= 4; }) == 0 &&
9136 "Failed to lift all the high half inputs to the low mask!");
9137 assert(std::count_if(HiMask.begin(), HiMask.end(),
9138 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9139 "Failed to lift all the low half inputs to the high mask!");
9141 // Do a half shuffle for the low mask.
9142 if (!isNoopShuffleMask(LoMask))
9143 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9144 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9146 // Do a half shuffle with the high mask after shifting its values down.
9147 for (int &M : HiMask)
9150 if (!isNoopShuffleMask(HiMask))
9151 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9152 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9157 /// \brief Detect whether the mask pattern should be lowered through
9160 /// This essentially tests whether viewing the mask as an interleaving of two
9161 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9162 /// lowering it through interleaving is a significantly better strategy.
9163 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9164 int NumEvenInputs[2] = {0, 0};
9165 int NumOddInputs[2] = {0, 0};
9166 int NumLoInputs[2] = {0, 0};
9167 int NumHiInputs[2] = {0, 0};
9168 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9172 int InputIdx = Mask[i] >= Size;
9175 ++NumLoInputs[InputIdx];
9177 ++NumHiInputs[InputIdx];
9180 ++NumEvenInputs[InputIdx];
9182 ++NumOddInputs[InputIdx];
9185 // The minimum number of cross-input results for both the interleaved and
9186 // split cases. If interleaving results in fewer cross-input results, return
9188 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9189 NumEvenInputs[0] + NumOddInputs[1]);
9190 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9191 NumLoInputs[0] + NumHiInputs[1]);
9192 return InterleavedCrosses < SplitCrosses;
9195 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9197 /// This strategy only works when the inputs from each vector fit into a single
9198 /// half of that vector, and generally there are not so many inputs as to leave
9199 /// the in-place shuffles required highly constrained (and thus expensive). It
9200 /// shifts all the inputs into a single side of both input vectors and then
9201 /// uses an unpack to interleave these inputs in a single vector. At that
9202 /// point, we will fall back on the generic single input shuffle lowering.
9203 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9205 MutableArrayRef<int> Mask,
9206 const X86Subtarget *Subtarget,
9207 SelectionDAG &DAG) {
9208 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9209 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9210 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9211 for (int i = 0; i < 8; ++i)
9212 if (Mask[i] >= 0 && Mask[i] < 4)
9213 LoV1Inputs.push_back(i);
9214 else if (Mask[i] >= 4 && Mask[i] < 8)
9215 HiV1Inputs.push_back(i);
9216 else if (Mask[i] >= 8 && Mask[i] < 12)
9217 LoV2Inputs.push_back(i);
9218 else if (Mask[i] >= 12)
9219 HiV2Inputs.push_back(i);
9221 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9222 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9225 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9226 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9227 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9229 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9230 HiV1Inputs.size() + HiV2Inputs.size();
9232 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9233 ArrayRef<int> HiInputs, bool MoveToLo,
9235 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9236 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9237 if (BadInputs.empty())
9240 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9241 int MoveOffset = MoveToLo ? 0 : 4;
9243 if (GoodInputs.empty()) {
9244 for (int BadInput : BadInputs) {
9245 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9246 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9249 if (GoodInputs.size() == 2) {
9250 // If the low inputs are spread across two dwords, pack them into
9252 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9253 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9254 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9255 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9257 // Otherwise pin the good inputs.
9258 for (int GoodInput : GoodInputs)
9259 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9262 if (BadInputs.size() == 2) {
9263 // If we have two bad inputs then there may be either one or two good
9264 // inputs fixed in place. Find a fixed input, and then find the *other*
9265 // two adjacent indices by using modular arithmetic.
9267 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9268 [](int M) { return M >= 0; }) -
9269 std::begin(MoveMask);
9271 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9272 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9273 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9274 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9275 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9276 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9277 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9279 assert(BadInputs.size() == 1 && "All sizes handled");
9280 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9281 std::end(MoveMask), -1) -
9282 std::begin(MoveMask);
9283 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9284 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9288 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9291 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9293 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9296 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9297 // cross-half traffic in the final shuffle.
9299 // Munge the mask to be a single-input mask after the unpack merges the
9303 M = 2 * (M % 4) + (M / 8);
9305 return DAG.getVectorShuffle(
9306 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9307 DL, MVT::v8i16, V1, V2),
9308 DAG.getUNDEF(MVT::v8i16), Mask);
9311 /// \brief Generic lowering of 8-lane i16 shuffles.
9313 /// This handles both single-input shuffles and combined shuffle/blends with
9314 /// two inputs. The single input shuffles are immediately delegated to
9315 /// a dedicated lowering routine.
9317 /// The blends are lowered in one of three fundamental ways. If there are few
9318 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9319 /// of the input is significantly cheaper when lowered as an interleaving of
9320 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9321 /// halves of the inputs separately (making them have relatively few inputs)
9322 /// and then concatenate them.
9323 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9324 const X86Subtarget *Subtarget,
9325 SelectionDAG &DAG) {
9327 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9328 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9329 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9330 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9331 ArrayRef<int> OrigMask = SVOp->getMask();
9332 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9333 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9334 MutableArrayRef<int> Mask(MaskStorage);
9336 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9338 // Whenever we can lower this as a zext, that instruction is strictly faster
9339 // than any alternative.
9340 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9341 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9344 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9345 auto isV2 = [](int M) { return M >= 8; };
9347 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9348 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9350 if (NumV2Inputs == 0)
9351 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9353 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9354 "to be V1-input shuffles.");
9356 // Try to use byte shift instructions.
9357 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9358 DL, MVT::v8i16, V1, V2, Mask, DAG))
9361 // There are special ways we can lower some single-element blends.
9362 if (NumV2Inputs == 1)
9363 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9364 Mask, Subtarget, DAG))
9367 // Use dedicated unpack instructions for masks that match their pattern.
9368 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9369 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9370 if (isShuffleEquivalent(Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9371 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9373 if (Subtarget->hasSSE41())
9374 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9378 // Try to use byte rotation instructions.
9379 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9380 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9383 if (NumV1Inputs + NumV2Inputs <= 4)
9384 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9386 // Check whether an interleaving lowering is likely to be more efficient.
9387 // This isn't perfect but it is a strong heuristic that tends to work well on
9388 // the kinds of shuffles that show up in practice.
9390 // FIXME: Handle 1x, 2x, and 4x interleaving.
9391 if (shouldLowerAsInterleaving(Mask)) {
9392 // FIXME: Figure out whether we should pack these into the low or high
9395 int EMask[8], OMask[8];
9396 for (int i = 0; i < 4; ++i) {
9397 EMask[i] = Mask[2*i];
9398 OMask[i] = Mask[2*i + 1];
9403 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9404 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9406 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9409 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9410 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9412 for (int i = 0; i < 4; ++i) {
9413 LoBlendMask[i] = Mask[i];
9414 HiBlendMask[i] = Mask[i + 4];
9417 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9418 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9419 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9420 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9422 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9423 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9426 /// \brief Check whether a compaction lowering can be done by dropping even
9427 /// elements and compute how many times even elements must be dropped.
9429 /// This handles shuffles which take every Nth element where N is a power of
9430 /// two. Example shuffle masks:
9432 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9433 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9434 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9435 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9436 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9437 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9439 /// Any of these lanes can of course be undef.
9441 /// This routine only supports N <= 3.
9442 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9445 /// \returns N above, or the number of times even elements must be dropped if
9446 /// there is such a number. Otherwise returns zero.
9447 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9448 // Figure out whether we're looping over two inputs or just one.
9449 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9451 // The modulus for the shuffle vector entries is based on whether this is
9452 // a single input or not.
9453 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9454 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9455 "We should only be called with masks with a power-of-2 size!");
9457 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9459 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9460 // and 2^3 simultaneously. This is because we may have ambiguity with
9461 // partially undef inputs.
9462 bool ViableForN[3] = {true, true, true};
9464 for (int i = 0, e = Mask.size(); i < e; ++i) {
9465 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9470 bool IsAnyViable = false;
9471 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9472 if (ViableForN[j]) {
9475 // The shuffle mask must be equal to (i * 2^N) % M.
9476 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9479 ViableForN[j] = false;
9481 // Early exit if we exhaust the possible powers of two.
9486 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9490 // Return 0 as there is no viable power of two.
9494 /// \brief Generic lowering of v16i8 shuffles.
9496 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9497 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9498 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9499 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9501 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9502 const X86Subtarget *Subtarget,
9503 SelectionDAG &DAG) {
9505 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9506 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9507 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9508 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9509 ArrayRef<int> OrigMask = SVOp->getMask();
9510 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9512 // Try to use byte shift instructions.
9513 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9514 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9517 // Try to use byte rotation instructions.
9518 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9519 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9522 // Try to use a zext lowering.
9523 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9524 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9527 int MaskStorage[16] = {
9528 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9529 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9530 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9531 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9532 MutableArrayRef<int> Mask(MaskStorage);
9533 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9534 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9537 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9539 // For single-input shuffles, there are some nicer lowering tricks we can use.
9540 if (NumV2Elements == 0) {
9541 // Check for being able to broadcast a single element.
9542 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9543 Mask, Subtarget, DAG))
9546 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9547 // Notably, this handles splat and partial-splat shuffles more efficiently.
9548 // However, it only makes sense if the pre-duplication shuffle simplifies
9549 // things significantly. Currently, this means we need to be able to
9550 // express the pre-duplication shuffle as an i16 shuffle.
9552 // FIXME: We should check for other patterns which can be widened into an
9553 // i16 shuffle as well.
9554 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9555 for (int i = 0; i < 16; i += 2)
9556 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9561 auto tryToWidenViaDuplication = [&]() -> SDValue {
9562 if (!canWidenViaDuplication(Mask))
9564 SmallVector<int, 4> LoInputs;
9565 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9566 [](int M) { return M >= 0 && M < 8; });
9567 std::sort(LoInputs.begin(), LoInputs.end());
9568 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9570 SmallVector<int, 4> HiInputs;
9571 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9572 [](int M) { return M >= 8; });
9573 std::sort(HiInputs.begin(), HiInputs.end());
9574 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9577 bool TargetLo = LoInputs.size() >= HiInputs.size();
9578 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9579 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9581 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9582 SmallDenseMap<int, int, 8> LaneMap;
9583 for (int I : InPlaceInputs) {
9584 PreDupI16Shuffle[I/2] = I/2;
9587 int j = TargetLo ? 0 : 4, je = j + 4;
9588 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9589 // Check if j is already a shuffle of this input. This happens when
9590 // there are two adjacent bytes after we move the low one.
9591 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9592 // If we haven't yet mapped the input, search for a slot into which
9594 while (j < je && PreDupI16Shuffle[j] != -1)
9598 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9601 // Map this input with the i16 shuffle.
9602 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9605 // Update the lane map based on the mapping we ended up with.
9606 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9609 ISD::BITCAST, DL, MVT::v16i8,
9610 DAG.getVectorShuffle(MVT::v8i16, DL,
9611 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9612 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9614 // Unpack the bytes to form the i16s that will be shuffled into place.
9615 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9616 MVT::v16i8, V1, V1);
9618 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9619 for (int i = 0; i < 16; ++i)
9620 if (Mask[i] != -1) {
9621 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9622 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9623 if (PostDupI16Shuffle[i / 2] == -1)
9624 PostDupI16Shuffle[i / 2] = MappedMask;
9626 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9627 "Conflicting entrties in the original shuffle!");
9630 ISD::BITCAST, DL, MVT::v16i8,
9631 DAG.getVectorShuffle(MVT::v8i16, DL,
9632 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9633 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9635 if (SDValue V = tryToWidenViaDuplication())
9639 // Check whether an interleaving lowering is likely to be more efficient.
9640 // This isn't perfect but it is a strong heuristic that tends to work well on
9641 // the kinds of shuffles that show up in practice.
9643 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9644 if (shouldLowerAsInterleaving(Mask)) {
9645 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9646 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9648 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9649 return (M >= 8 && M < 16) || M >= 24;
9651 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9652 -1, -1, -1, -1, -1, -1, -1, -1};
9653 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9654 -1, -1, -1, -1, -1, -1, -1, -1};
9655 bool UnpackLo = NumLoHalf >= NumHiHalf;
9656 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9657 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9658 for (int i = 0; i < 8; ++i) {
9659 TargetEMask[i] = Mask[2 * i];
9660 TargetOMask[i] = Mask[2 * i + 1];
9663 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9664 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9666 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9667 MVT::v16i8, Evens, Odds);
9670 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9671 // with PSHUFB. It is important to do this before we attempt to generate any
9672 // blends but after all of the single-input lowerings. If the single input
9673 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9674 // want to preserve that and we can DAG combine any longer sequences into
9675 // a PSHUFB in the end. But once we start blending from multiple inputs,
9676 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9677 // and there are *very* few patterns that would actually be faster than the
9678 // PSHUFB approach because of its ability to zero lanes.
9680 // FIXME: The only exceptions to the above are blends which are exact
9681 // interleavings with direct instructions supporting them. We currently don't
9682 // handle those well here.
9683 if (Subtarget->hasSSSE3()) {
9686 bool V1InUse = false;
9687 bool V2InUse = false;
9688 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9690 for (int i = 0; i < 16; ++i) {
9691 if (Mask[i] == -1) {
9692 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9694 const int ZeroMask = 0x80;
9695 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9696 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9698 V1Idx = V2Idx = ZeroMask;
9699 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9700 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9701 V1InUse |= (ZeroMask != V1Idx);
9702 V2InUse |= (ZeroMask != V2Idx);
9707 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9708 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9710 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9711 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9713 // If we need shuffled inputs from both, blend the two.
9714 if (V1InUse && V2InUse)
9715 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9717 return V1; // Single inputs are easy.
9719 return V2; // Single inputs are easy.
9720 // Shuffling to a zeroable vector.
9721 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9724 // There are special ways we can lower some single-element blends.
9725 if (NumV2Elements == 1)
9726 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9727 Mask, Subtarget, DAG))
9730 // Check whether a compaction lowering can be done. This handles shuffles
9731 // which take every Nth element for some even N. See the helper function for
9734 // We special case these as they can be particularly efficiently handled with
9735 // the PACKUSB instruction on x86 and they show up in common patterns of
9736 // rearranging bytes to truncate wide elements.
9737 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9738 // NumEvenDrops is the power of two stride of the elements. Another way of
9739 // thinking about it is that we need to drop the even elements this many
9740 // times to get the original input.
9741 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9743 // First we need to zero all the dropped bytes.
9744 assert(NumEvenDrops <= 3 &&
9745 "No support for dropping even elements more than 3 times.");
9746 // We use the mask type to pick which bytes are preserved based on how many
9747 // elements are dropped.
9748 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9749 SDValue ByteClearMask =
9750 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9751 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9752 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9754 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9756 // Now pack things back together.
9757 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9758 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9759 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9760 for (int i = 1; i < NumEvenDrops; ++i) {
9761 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9762 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9768 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9769 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9770 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9771 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9773 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9774 MutableArrayRef<int> V1HalfBlendMask,
9775 MutableArrayRef<int> V2HalfBlendMask) {
9776 for (int i = 0; i < 8; ++i)
9777 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9778 V1HalfBlendMask[i] = HalfMask[i];
9780 } else if (HalfMask[i] >= 16) {
9781 V2HalfBlendMask[i] = HalfMask[i] - 16;
9782 HalfMask[i] = i + 8;
9785 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
9786 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
9788 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9790 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
9791 MutableArrayRef<int> HiBlendMask) {
9793 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
9794 // them out and avoid using UNPCK{L,H} to extract the elements of V as
9796 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
9797 [](int M) { return M >= 0 && M % 2 == 1; }) &&
9798 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
9799 [](int M) { return M >= 0 && M % 2 == 1; })) {
9800 // Use a mask to drop the high bytes.
9801 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
9802 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
9803 DAG.getConstant(0x00FF, MVT::v8i16));
9805 // This will be a single vector shuffle instead of a blend so nuke V2.
9806 V2 = DAG.getUNDEF(MVT::v8i16);
9808 // Squash the masks to point directly into V1.
9809 for (int &M : LoBlendMask)
9812 for (int &M : HiBlendMask)
9816 // Otherwise just unpack the low half of V into V1 and the high half into
9817 // V2 so that we can blend them as i16s.
9818 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9819 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
9820 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9821 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
9824 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9825 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9826 return std::make_pair(BlendedLo, BlendedHi);
9828 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
9829 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
9830 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
9832 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
9833 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
9835 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
9838 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
9840 /// This routine breaks down the specific type of 128-bit shuffle and
9841 /// dispatches to the lowering routines accordingly.
9842 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9843 MVT VT, const X86Subtarget *Subtarget,
9844 SelectionDAG &DAG) {
9845 switch (VT.SimpleTy) {
9847 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9849 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9851 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9853 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9855 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
9857 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
9860 llvm_unreachable("Unimplemented!");
9864 /// \brief Helper function to test whether a shuffle mask could be
9865 /// simplified by widening the elements being shuffled.
9867 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
9868 /// leaves it in an unspecified state.
9870 /// NOTE: This must handle normal vector shuffle masks and *target* vector
9871 /// shuffle masks. The latter have the special property of a '-2' representing
9872 /// a zero-ed lane of a vector.
9873 static bool canWidenShuffleElements(ArrayRef<int> Mask,
9874 SmallVectorImpl<int> &WidenedMask) {
9875 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
9876 // If both elements are undef, its trivial.
9877 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
9878 WidenedMask.push_back(SM_SentinelUndef);
9882 // Check for an undef mask and a mask value properly aligned to fit with
9883 // a pair of values. If we find such a case, use the non-undef mask's value.
9884 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
9885 WidenedMask.push_back(Mask[i + 1] / 2);
9888 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
9889 WidenedMask.push_back(Mask[i] / 2);
9893 // When zeroing, we need to spread the zeroing across both lanes to widen.
9894 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
9895 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
9896 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
9897 WidenedMask.push_back(SM_SentinelZero);
9903 // Finally check if the two mask values are adjacent and aligned with
9905 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
9906 WidenedMask.push_back(Mask[i] / 2);
9910 // Otherwise we can't safely widen the elements used in this shuffle.
9913 assert(WidenedMask.size() == Mask.size() / 2 &&
9914 "Incorrect size of mask after widening the elements!");
9919 /// \brief Generic routine to split ector shuffle into half-sized shuffles.
9921 /// This routine just extracts two subvectors, shuffles them independently, and
9922 /// then concatenates them back together. This should work effectively with all
9923 /// AVX vector shuffle types.
9924 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
9925 SDValue V2, ArrayRef<int> Mask,
9926 SelectionDAG &DAG) {
9927 assert(VT.getSizeInBits() >= 256 &&
9928 "Only for 256-bit or wider vector shuffles!");
9929 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
9930 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
9932 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
9933 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
9935 int NumElements = VT.getVectorNumElements();
9936 int SplitNumElements = NumElements / 2;
9937 MVT ScalarVT = VT.getScalarType();
9938 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
9940 SDValue LoV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
9941 DAG.getIntPtrConstant(0));
9942 SDValue HiV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
9943 DAG.getIntPtrConstant(SplitNumElements));
9944 SDValue LoV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
9945 DAG.getIntPtrConstant(0));
9946 SDValue HiV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
9947 DAG.getIntPtrConstant(SplitNumElements));
9949 // Now create two 4-way blends of these half-width vectors.
9950 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
9951 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
9952 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
9953 for (int i = 0; i < SplitNumElements; ++i) {
9954 int M = HalfMask[i];
9955 if (M >= NumElements) {
9956 if (M >= NumElements + SplitNumElements)
9960 V2BlendMask.push_back(M - NumElements);
9961 V1BlendMask.push_back(-1);
9962 BlendMask.push_back(SplitNumElements + i);
9963 } else if (M >= 0) {
9964 if (M >= SplitNumElements)
9968 V2BlendMask.push_back(-1);
9969 V1BlendMask.push_back(M);
9970 BlendMask.push_back(i);
9972 V2BlendMask.push_back(-1);
9973 V1BlendMask.push_back(-1);
9974 BlendMask.push_back(-1);
9978 // Because the lowering happens after all combining takes place, we need to
9979 // manually combine these blend masks as much as possible so that we create
9980 // a minimal number of high-level vector shuffle nodes.
9982 // First try just blending the halves of V1 or V2.
9983 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
9984 return DAG.getUNDEF(SplitVT);
9985 if (!UseLoV2 && !UseHiV2)
9986 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
9987 if (!UseLoV1 && !UseHiV1)
9988 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
9990 SDValue V1Blend, V2Blend;
9991 if (UseLoV1 && UseHiV1) {
9993 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
9995 // We only use half of V1 so map the usage down into the final blend mask.
9996 V1Blend = UseLoV1 ? LoV1 : HiV1;
9997 for (int i = 0; i < SplitNumElements; ++i)
9998 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
9999 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10001 if (UseLoV2 && UseHiV2) {
10003 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10005 // We only use half of V2 so map the usage down into the final blend mask.
10006 V2Blend = UseLoV2 ? LoV2 : HiV2;
10007 for (int i = 0; i < SplitNumElements; ++i)
10008 if (BlendMask[i] >= SplitNumElements)
10009 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10011 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10013 SDValue Lo = HalfBlend(LoMask);
10014 SDValue Hi = HalfBlend(HiMask);
10015 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10018 /// \brief Either split a vector in halves or decompose the shuffles and the
10021 /// This is provided as a good fallback for many lowerings of non-single-input
10022 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10023 /// between splitting the shuffle into 128-bit components and stitching those
10024 /// back together vs. extracting the single-input shuffles and blending those
10026 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10027 SDValue V2, ArrayRef<int> Mask,
10028 SelectionDAG &DAG) {
10029 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10030 "lower single-input shuffles as it "
10031 "could then recurse on itself.");
10032 int Size = Mask.size();
10034 // If this can be modeled as a broadcast of two elements followed by a blend,
10035 // prefer that lowering. This is especially important because broadcasts can
10036 // often fold with memory operands.
10037 auto DoBothBroadcast = [&] {
10038 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10041 if (V2BroadcastIdx == -1)
10042 V2BroadcastIdx = M - Size;
10043 else if (M - Size != V2BroadcastIdx)
10045 } else if (M >= 0) {
10046 if (V1BroadcastIdx == -1)
10047 V1BroadcastIdx = M;
10048 else if (M != V1BroadcastIdx)
10053 if (DoBothBroadcast())
10054 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10057 // If the inputs all stem from a single 128-bit lane of each input, then we
10058 // split them rather than blending because the split will decompose to
10059 // unusually few instructions.
10060 int LaneCount = VT.getSizeInBits() / 128;
10061 int LaneSize = Size / LaneCount;
10062 SmallBitVector LaneInputs[2];
10063 LaneInputs[0].resize(LaneCount, false);
10064 LaneInputs[1].resize(LaneCount, false);
10065 for (int i = 0; i < Size; ++i)
10067 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10068 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10069 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10071 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10072 // that the decomposed single-input shuffles don't end up here.
10073 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10076 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10077 /// a permutation and blend of those lanes.
10079 /// This essentially blends the out-of-lane inputs to each lane into the lane
10080 /// from a permuted copy of the vector. This lowering strategy results in four
10081 /// instructions in the worst case for a single-input cross lane shuffle which
10082 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10083 /// of. Special cases for each particular shuffle pattern should be handled
10084 /// prior to trying this lowering.
10085 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10086 SDValue V1, SDValue V2,
10087 ArrayRef<int> Mask,
10088 SelectionDAG &DAG) {
10089 // FIXME: This should probably be generalized for 512-bit vectors as well.
10090 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10091 int LaneSize = Mask.size() / 2;
10093 // If there are only inputs from one 128-bit lane, splitting will in fact be
10094 // less expensive. The flags track wether the given lane contains an element
10095 // that crosses to another lane.
10096 bool LaneCrossing[2] = {false, false};
10097 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10098 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10099 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10100 if (!LaneCrossing[0] || !LaneCrossing[1])
10101 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10103 if (isSingleInputShuffleMask(Mask)) {
10104 SmallVector<int, 32> FlippedBlendMask;
10105 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10106 FlippedBlendMask.push_back(
10107 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10109 : Mask[i] % LaneSize +
10110 (i / LaneSize) * LaneSize + Size));
10112 // Flip the vector, and blend the results which should now be in-lane. The
10113 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10114 // 5 for the high source. The value 3 selects the high half of source 2 and
10115 // the value 2 selects the low half of source 2. We only use source 2 to
10116 // allow folding it into a memory operand.
10117 unsigned PERMMask = 3 | 2 << 4;
10118 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10119 V1, DAG.getConstant(PERMMask, MVT::i8));
10120 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10123 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10124 // will be handled by the above logic and a blend of the results, much like
10125 // other patterns in AVX.
10126 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10129 /// \brief Handle lowering 2-lane 128-bit shuffles.
10130 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10131 SDValue V2, ArrayRef<int> Mask,
10132 const X86Subtarget *Subtarget,
10133 SelectionDAG &DAG) {
10134 // Blends are faster and handle all the non-lane-crossing cases.
10135 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10139 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10140 VT.getVectorNumElements() / 2);
10141 // Check for patterns which can be matched with a single insert of a 128-bit
10143 if (isShuffleEquivalent(Mask, 0, 1, 0, 1) ||
10144 isShuffleEquivalent(Mask, 0, 1, 4, 5)) {
10145 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10146 DAG.getIntPtrConstant(0));
10147 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10148 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10149 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10151 if (isShuffleEquivalent(Mask, 0, 1, 6, 7)) {
10152 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10153 DAG.getIntPtrConstant(0));
10154 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10155 DAG.getIntPtrConstant(2));
10156 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10159 // Otherwise form a 128-bit permutation.
10160 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10161 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10162 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10163 DAG.getConstant(PermMask, MVT::i8));
10166 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10167 /// shuffling each lane.
10169 /// This will only succeed when the result of fixing the 128-bit lanes results
10170 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10171 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10172 /// the lane crosses early and then use simpler shuffles within each lane.
10174 /// FIXME: It might be worthwhile at some point to support this without
10175 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10176 /// in x86 only floating point has interesting non-repeating shuffles, and even
10177 /// those are still *marginally* more expensive.
10178 static SDValue lowerVectorShuffleByMerging128BitLanes(
10179 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10180 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10181 assert(!isSingleInputShuffleMask(Mask) &&
10182 "This is only useful with multiple inputs.");
10184 int Size = Mask.size();
10185 int LaneSize = 128 / VT.getScalarSizeInBits();
10186 int NumLanes = Size / LaneSize;
10187 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10189 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10190 // check whether the in-128-bit lane shuffles share a repeating pattern.
10191 SmallVector<int, 4> Lanes;
10192 Lanes.resize(NumLanes, -1);
10193 SmallVector<int, 4> InLaneMask;
10194 InLaneMask.resize(LaneSize, -1);
10195 for (int i = 0; i < Size; ++i) {
10199 int j = i / LaneSize;
10201 if (Lanes[j] < 0) {
10202 // First entry we've seen for this lane.
10203 Lanes[j] = Mask[i] / LaneSize;
10204 } else if (Lanes[j] != Mask[i] / LaneSize) {
10205 // This doesn't match the lane selected previously!
10209 // Check that within each lane we have a consistent shuffle mask.
10210 int k = i % LaneSize;
10211 if (InLaneMask[k] < 0) {
10212 InLaneMask[k] = Mask[i] % LaneSize;
10213 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10214 // This doesn't fit a repeating in-lane mask.
10219 // First shuffle the lanes into place.
10220 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10221 VT.getSizeInBits() / 64);
10222 SmallVector<int, 8> LaneMask;
10223 LaneMask.resize(NumLanes * 2, -1);
10224 for (int i = 0; i < NumLanes; ++i)
10225 if (Lanes[i] >= 0) {
10226 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10227 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10230 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10231 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10232 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10234 // Cast it back to the type we actually want.
10235 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10237 // Now do a simple shuffle that isn't lane crossing.
10238 SmallVector<int, 8> NewMask;
10239 NewMask.resize(Size, -1);
10240 for (int i = 0; i < Size; ++i)
10242 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10243 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10244 "Must not introduce lane crosses at this point!");
10246 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10249 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10252 /// This returns true if the elements from a particular input are already in the
10253 /// slot required by the given mask and require no permutation.
10254 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10255 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10256 int Size = Mask.size();
10257 for (int i = 0; i < Size; ++i)
10258 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10264 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10266 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10267 /// isn't available.
10268 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10269 const X86Subtarget *Subtarget,
10270 SelectionDAG &DAG) {
10272 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10273 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10274 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10275 ArrayRef<int> Mask = SVOp->getMask();
10276 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10278 SmallVector<int, 4> WidenedMask;
10279 if (canWidenShuffleElements(Mask, WidenedMask))
10280 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10283 if (isSingleInputShuffleMask(Mask)) {
10284 // Check for being able to broadcast a single element.
10285 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10286 Mask, Subtarget, DAG))
10289 // Use low duplicate instructions for masks that match their pattern.
10290 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
10291 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10293 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10294 // Non-half-crossing single input shuffles can be lowerid with an
10295 // interleaved permutation.
10296 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10297 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10298 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10299 DAG.getConstant(VPERMILPMask, MVT::i8));
10302 // With AVX2 we have direct support for this permutation.
10303 if (Subtarget->hasAVX2())
10304 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10305 getV4X86ShuffleImm8ForMask(Mask, DAG));
10307 // Otherwise, fall back.
10308 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10312 // X86 has dedicated unpack instructions that can handle specific blend
10313 // operations: UNPCKH and UNPCKL.
10314 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10315 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10316 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10317 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10319 // If we have a single input to the zero element, insert that into V1 if we
10320 // can do so cheaply.
10321 int NumV2Elements =
10322 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10323 if (NumV2Elements == 1 && Mask[0] >= 4)
10324 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10325 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10328 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10332 // Check if the blend happens to exactly fit that of SHUFPD.
10333 if ((Mask[0] == -1 || Mask[0] < 2) &&
10334 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10335 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10336 (Mask[3] == -1 || Mask[3] >= 6)) {
10337 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10338 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10339 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10340 DAG.getConstant(SHUFPDMask, MVT::i8));
10342 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10343 (Mask[1] == -1 || Mask[1] < 2) &&
10344 (Mask[2] == -1 || Mask[2] >= 6) &&
10345 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10346 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10347 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10348 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10349 DAG.getConstant(SHUFPDMask, MVT::i8));
10352 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10353 // shuffle. However, if we have AVX2 and either inputs are already in place,
10354 // we will be able to shuffle even across lanes the other input in a single
10355 // instruction so skip this pattern.
10356 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10357 isShuffleMaskInputInPlace(1, Mask))))
10358 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10359 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10362 // If we have AVX2 then we always want to lower with a blend because an v4 we
10363 // can fully permute the elements.
10364 if (Subtarget->hasAVX2())
10365 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10368 // Otherwise fall back on generic lowering.
10369 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10372 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10374 /// This routine is only called when we have AVX2 and thus a reasonable
10375 /// instruction set for v4i64 shuffling..
10376 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10377 const X86Subtarget *Subtarget,
10378 SelectionDAG &DAG) {
10380 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10381 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10382 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10383 ArrayRef<int> Mask = SVOp->getMask();
10384 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10385 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10387 SmallVector<int, 4> WidenedMask;
10388 if (canWidenShuffleElements(Mask, WidenedMask))
10389 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10392 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10396 // Check for being able to broadcast a single element.
10397 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10398 Mask, Subtarget, DAG))
10401 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10402 // use lower latency instructions that will operate on both 128-bit lanes.
10403 SmallVector<int, 2> RepeatedMask;
10404 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10405 if (isSingleInputShuffleMask(Mask)) {
10406 int PSHUFDMask[] = {-1, -1, -1, -1};
10407 for (int i = 0; i < 2; ++i)
10408 if (RepeatedMask[i] >= 0) {
10409 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10410 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10412 return DAG.getNode(
10413 ISD::BITCAST, DL, MVT::v4i64,
10414 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10415 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10416 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10419 // Use dedicated unpack instructions for masks that match their pattern.
10420 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10421 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10422 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10423 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10426 // AVX2 provides a direct instruction for permuting a single input across
10428 if (isSingleInputShuffleMask(Mask))
10429 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10430 getV4X86ShuffleImm8ForMask(Mask, DAG));
10432 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10433 // shuffle. However, if we have AVX2 and either inputs are already in place,
10434 // we will be able to shuffle even across lanes the other input in a single
10435 // instruction so skip this pattern.
10436 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10437 isShuffleMaskInputInPlace(1, Mask))))
10438 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10439 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10442 // Otherwise fall back on generic blend lowering.
10443 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10447 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10449 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10450 /// isn't available.
10451 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10452 const X86Subtarget *Subtarget,
10453 SelectionDAG &DAG) {
10455 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10456 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10457 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10458 ArrayRef<int> Mask = SVOp->getMask();
10459 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10461 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10465 // Check for being able to broadcast a single element.
10466 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10467 Mask, Subtarget, DAG))
10470 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10471 // options to efficiently lower the shuffle.
10472 SmallVector<int, 4> RepeatedMask;
10473 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10474 assert(RepeatedMask.size() == 4 &&
10475 "Repeated masks must be half the mask width!");
10477 // Use even/odd duplicate instructions for masks that match their pattern.
10478 if (isShuffleEquivalent(Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10479 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10480 if (isShuffleEquivalent(Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10481 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10483 if (isSingleInputShuffleMask(Mask))
10484 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10485 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10487 // Use dedicated unpack instructions for masks that match their pattern.
10488 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10489 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10490 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10491 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10493 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10494 // have already handled any direct blends. We also need to squash the
10495 // repeated mask into a simulated v4f32 mask.
10496 for (int i = 0; i < 4; ++i)
10497 if (RepeatedMask[i] >= 8)
10498 RepeatedMask[i] -= 4;
10499 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10502 // If we have a single input shuffle with different shuffle patterns in the
10503 // two 128-bit lanes use the variable mask to VPERMILPS.
10504 if (isSingleInputShuffleMask(Mask)) {
10505 SDValue VPermMask[8];
10506 for (int i = 0; i < 8; ++i)
10507 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10508 : DAG.getConstant(Mask[i], MVT::i32);
10509 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10510 return DAG.getNode(
10511 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10512 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10514 if (Subtarget->hasAVX2())
10515 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10516 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10517 DAG.getNode(ISD::BUILD_VECTOR, DL,
10518 MVT::v8i32, VPermMask)),
10521 // Otherwise, fall back.
10522 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10526 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10528 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10529 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10532 // If we have AVX2 then we always want to lower with a blend because at v8 we
10533 // can fully permute the elements.
10534 if (Subtarget->hasAVX2())
10535 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10538 // Otherwise fall back on generic lowering.
10539 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10542 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10544 /// This routine is only called when we have AVX2 and thus a reasonable
10545 /// instruction set for v8i32 shuffling..
10546 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10547 const X86Subtarget *Subtarget,
10548 SelectionDAG &DAG) {
10550 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10551 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10552 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10553 ArrayRef<int> Mask = SVOp->getMask();
10554 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10555 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10557 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10561 // Check for being able to broadcast a single element.
10562 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10563 Mask, Subtarget, DAG))
10566 // If the shuffle mask is repeated in each 128-bit lane we can use more
10567 // efficient instructions that mirror the shuffles across the two 128-bit
10569 SmallVector<int, 4> RepeatedMask;
10570 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10571 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10572 if (isSingleInputShuffleMask(Mask))
10573 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10574 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10576 // Use dedicated unpack instructions for masks that match their pattern.
10577 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10578 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10579 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10580 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10583 // If the shuffle patterns aren't repeated but it is a single input, directly
10584 // generate a cross-lane VPERMD instruction.
10585 if (isSingleInputShuffleMask(Mask)) {
10586 SDValue VPermMask[8];
10587 for (int i = 0; i < 8; ++i)
10588 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10589 : DAG.getConstant(Mask[i], MVT::i32);
10590 return DAG.getNode(
10591 X86ISD::VPERMV, DL, MVT::v8i32,
10592 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10595 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10597 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10598 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10601 // Otherwise fall back on generic blend lowering.
10602 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10606 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10608 /// This routine is only called when we have AVX2 and thus a reasonable
10609 /// instruction set for v16i16 shuffling..
10610 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10611 const X86Subtarget *Subtarget,
10612 SelectionDAG &DAG) {
10614 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10615 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10616 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10617 ArrayRef<int> Mask = SVOp->getMask();
10618 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10619 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10621 // Check for being able to broadcast a single element.
10622 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10623 Mask, Subtarget, DAG))
10626 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10630 // Use dedicated unpack instructions for masks that match their pattern.
10631 if (isShuffleEquivalent(Mask,
10632 // First 128-bit lane:
10633 0, 16, 1, 17, 2, 18, 3, 19,
10634 // Second 128-bit lane:
10635 8, 24, 9, 25, 10, 26, 11, 27))
10636 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10637 if (isShuffleEquivalent(Mask,
10638 // First 128-bit lane:
10639 4, 20, 5, 21, 6, 22, 7, 23,
10640 // Second 128-bit lane:
10641 12, 28, 13, 29, 14, 30, 15, 31))
10642 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10644 if (isSingleInputShuffleMask(Mask)) {
10645 // There are no generalized cross-lane shuffle operations available on i16
10647 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10648 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10651 SDValue PSHUFBMask[32];
10652 for (int i = 0; i < 16; ++i) {
10653 if (Mask[i] == -1) {
10654 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10658 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10659 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10660 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10661 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10663 return DAG.getNode(
10664 ISD::BITCAST, DL, MVT::v16i16,
10666 X86ISD::PSHUFB, DL, MVT::v32i8,
10667 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10668 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10671 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10673 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10674 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10677 // Otherwise fall back on generic lowering.
10678 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10681 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10683 /// This routine is only called when we have AVX2 and thus a reasonable
10684 /// instruction set for v32i8 shuffling..
10685 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10686 const X86Subtarget *Subtarget,
10687 SelectionDAG &DAG) {
10689 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10690 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10691 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10692 ArrayRef<int> Mask = SVOp->getMask();
10693 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10694 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10696 // Check for being able to broadcast a single element.
10697 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10698 Mask, Subtarget, DAG))
10701 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10705 // Use dedicated unpack instructions for masks that match their pattern.
10706 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10708 if (isShuffleEquivalent(
10710 // First 128-bit lane:
10711 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10712 // Second 128-bit lane:
10713 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10714 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10715 if (isShuffleEquivalent(
10717 // First 128-bit lane:
10718 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
10719 // Second 128-bit lane:
10720 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
10721 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
10723 if (isSingleInputShuffleMask(Mask)) {
10724 // There are no generalized cross-lane shuffle operations available on i8
10726 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
10727 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
10730 SDValue PSHUFBMask[32];
10731 for (int i = 0; i < 32; ++i)
10734 ? DAG.getUNDEF(MVT::i8)
10735 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
10737 return DAG.getNode(
10738 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
10739 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
10742 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10744 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10745 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
10748 // Otherwise fall back on generic lowering.
10749 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
10752 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
10754 /// This routine either breaks down the specific type of a 256-bit x86 vector
10755 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
10756 /// together based on the available instructions.
10757 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10758 MVT VT, const X86Subtarget *Subtarget,
10759 SelectionDAG &DAG) {
10761 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10762 ArrayRef<int> Mask = SVOp->getMask();
10764 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
10765 // check for those subtargets here and avoid much of the subtarget querying in
10766 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
10767 // ability to manipulate a 256-bit vector with integer types. Since we'll use
10768 // floating point types there eventually, just immediately cast everything to
10769 // a float and operate entirely in that domain.
10770 if (VT.isInteger() && !Subtarget->hasAVX2()) {
10771 int ElementBits = VT.getScalarSizeInBits();
10772 if (ElementBits < 32)
10773 // No floating point type available, decompose into 128-bit vectors.
10774 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10776 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
10777 VT.getVectorNumElements());
10778 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
10779 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
10780 return DAG.getNode(ISD::BITCAST, DL, VT,
10781 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
10784 switch (VT.SimpleTy) {
10786 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10788 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10790 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10792 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10794 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10796 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10799 llvm_unreachable("Not a valid 256-bit x86 vector type!");
10803 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
10804 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10805 const X86Subtarget *Subtarget,
10806 SelectionDAG &DAG) {
10808 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10809 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10810 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10811 ArrayRef<int> Mask = SVOp->getMask();
10812 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10814 // X86 has dedicated unpack instructions that can handle specific blend
10815 // operations: UNPCKH and UNPCKL.
10816 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10817 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
10818 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10819 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
10821 // FIXME: Implement direct support for this type!
10822 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
10825 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
10826 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10827 const X86Subtarget *Subtarget,
10828 SelectionDAG &DAG) {
10830 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10831 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10832 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10833 ArrayRef<int> Mask = SVOp->getMask();
10834 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10836 // Use dedicated unpack instructions for masks that match their pattern.
10837 if (isShuffleEquivalent(Mask,
10838 0, 16, 1, 17, 4, 20, 5, 21,
10839 8, 24, 9, 25, 12, 28, 13, 29))
10840 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
10841 if (isShuffleEquivalent(Mask,
10842 2, 18, 3, 19, 6, 22, 7, 23,
10843 10, 26, 11, 27, 14, 30, 15, 31))
10844 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
10846 // FIXME: Implement direct support for this type!
10847 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
10850 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
10851 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10852 const X86Subtarget *Subtarget,
10853 SelectionDAG &DAG) {
10855 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10856 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10857 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10858 ArrayRef<int> Mask = SVOp->getMask();
10859 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10861 // X86 has dedicated unpack instructions that can handle specific blend
10862 // operations: UNPCKH and UNPCKL.
10863 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10864 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
10865 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10866 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
10868 // FIXME: Implement direct support for this type!
10869 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
10872 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
10873 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10874 const X86Subtarget *Subtarget,
10875 SelectionDAG &DAG) {
10877 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
10878 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
10879 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10880 ArrayRef<int> Mask = SVOp->getMask();
10881 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10883 // Use dedicated unpack instructions for masks that match their pattern.
10884 if (isShuffleEquivalent(Mask,
10885 0, 16, 1, 17, 4, 20, 5, 21,
10886 8, 24, 9, 25, 12, 28, 13, 29))
10887 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
10888 if (isShuffleEquivalent(Mask,
10889 2, 18, 3, 19, 6, 22, 7, 23,
10890 10, 26, 11, 27, 14, 30, 15, 31))
10891 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
10893 // FIXME: Implement direct support for this type!
10894 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
10897 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
10898 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10899 const X86Subtarget *Subtarget,
10900 SelectionDAG &DAG) {
10902 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
10903 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
10904 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10905 ArrayRef<int> Mask = SVOp->getMask();
10906 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10907 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
10909 // FIXME: Implement direct support for this type!
10910 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
10913 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
10914 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10915 const X86Subtarget *Subtarget,
10916 SelectionDAG &DAG) {
10918 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
10919 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
10920 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10921 ArrayRef<int> Mask = SVOp->getMask();
10922 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
10923 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
10925 // FIXME: Implement direct support for this type!
10926 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
10929 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
10931 /// This routine either breaks down the specific type of a 512-bit x86 vector
10932 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
10933 /// together based on the available instructions.
10934 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10935 MVT VT, const X86Subtarget *Subtarget,
10936 SelectionDAG &DAG) {
10938 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10939 ArrayRef<int> Mask = SVOp->getMask();
10940 assert(Subtarget->hasAVX512() &&
10941 "Cannot lower 512-bit vectors w/ basic ISA!");
10943 // Check for being able to broadcast a single element.
10944 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
10945 Mask, Subtarget, DAG))
10948 // Dispatch to each element type for lowering. If we don't have supprot for
10949 // specific element type shuffles at 512 bits, immediately split them and
10950 // lower them. Each lowering routine of a given type is allowed to assume that
10951 // the requisite ISA extensions for that element type are available.
10952 switch (VT.SimpleTy) {
10954 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10956 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10958 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10960 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10962 if (Subtarget->hasBWI())
10963 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10966 if (Subtarget->hasBWI())
10967 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10971 llvm_unreachable("Not a valid 512-bit x86 vector type!");
10974 // Otherwise fall back on splitting.
10975 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10978 /// \brief Top-level lowering for x86 vector shuffles.
10980 /// This handles decomposition, canonicalization, and lowering of all x86
10981 /// vector shuffles. Most of the specific lowering strategies are encapsulated
10982 /// above in helper routines. The canonicalization attempts to widen shuffles
10983 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
10984 /// s.t. only one of the two inputs needs to be tested, etc.
10985 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
10986 SelectionDAG &DAG) {
10987 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10988 ArrayRef<int> Mask = SVOp->getMask();
10989 SDValue V1 = Op.getOperand(0);
10990 SDValue V2 = Op.getOperand(1);
10991 MVT VT = Op.getSimpleValueType();
10992 int NumElements = VT.getVectorNumElements();
10995 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
10997 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
10998 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
10999 if (V1IsUndef && V2IsUndef)
11000 return DAG.getUNDEF(VT);
11002 // When we create a shuffle node we put the UNDEF node to second operand,
11003 // but in some cases the first operand may be transformed to UNDEF.
11004 // In this case we should just commute the node.
11006 return DAG.getCommutedVectorShuffle(*SVOp);
11008 // Check for non-undef masks pointing at an undef vector and make the masks
11009 // undef as well. This makes it easier to match the shuffle based solely on
11013 if (M >= NumElements) {
11014 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11015 for (int &M : NewMask)
11016 if (M >= NumElements)
11018 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11021 // Try to collapse shuffles into using a vector type with fewer elements but
11022 // wider element types. We cap this to not form integers or floating point
11023 // elements wider than 64 bits, but it might be interesting to form i128
11024 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11025 SmallVector<int, 16> WidenedMask;
11026 if (VT.getScalarSizeInBits() < 64 &&
11027 canWidenShuffleElements(Mask, WidenedMask)) {
11028 MVT NewEltVT = VT.isFloatingPoint()
11029 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11030 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11031 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11032 // Make sure that the new vector type is legal. For example, v2f64 isn't
11034 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11035 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11036 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11037 return DAG.getNode(ISD::BITCAST, dl, VT,
11038 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11042 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11043 for (int M : SVOp->getMask())
11045 ++NumUndefElements;
11046 else if (M < NumElements)
11051 // Commute the shuffle as needed such that more elements come from V1 than
11052 // V2. This allows us to match the shuffle pattern strictly on how many
11053 // elements come from V1 without handling the symmetric cases.
11054 if (NumV2Elements > NumV1Elements)
11055 return DAG.getCommutedVectorShuffle(*SVOp);
11057 // When the number of V1 and V2 elements are the same, try to minimize the
11058 // number of uses of V2 in the low half of the vector. When that is tied,
11059 // ensure that the sum of indices for V1 is equal to or lower than the sum
11060 // indices for V2. When those are equal, try to ensure that the number of odd
11061 // indices for V1 is lower than the number of odd indices for V2.
11062 if (NumV1Elements == NumV2Elements) {
11063 int LowV1Elements = 0, LowV2Elements = 0;
11064 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11065 if (M >= NumElements)
11069 if (LowV2Elements > LowV1Elements) {
11070 return DAG.getCommutedVectorShuffle(*SVOp);
11071 } else if (LowV2Elements == LowV1Elements) {
11072 int SumV1Indices = 0, SumV2Indices = 0;
11073 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11074 if (SVOp->getMask()[i] >= NumElements)
11076 else if (SVOp->getMask()[i] >= 0)
11078 if (SumV2Indices < SumV1Indices) {
11079 return DAG.getCommutedVectorShuffle(*SVOp);
11080 } else if (SumV2Indices == SumV1Indices) {
11081 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11082 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11083 if (SVOp->getMask()[i] >= NumElements)
11084 NumV2OddIndices += i % 2;
11085 else if (SVOp->getMask()[i] >= 0)
11086 NumV1OddIndices += i % 2;
11087 if (NumV2OddIndices < NumV1OddIndices)
11088 return DAG.getCommutedVectorShuffle(*SVOp);
11093 // For each vector width, delegate to a specialized lowering routine.
11094 if (VT.getSizeInBits() == 128)
11095 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11097 if (VT.getSizeInBits() == 256)
11098 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11100 // Force AVX-512 vectors to be scalarized for now.
11101 // FIXME: Implement AVX-512 support!
11102 if (VT.getSizeInBits() == 512)
11103 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11105 llvm_unreachable("Unimplemented!");
11109 //===----------------------------------------------------------------------===//
11110 // Legacy vector shuffle lowering
11112 // This code is the legacy code handling vector shuffles until the above
11113 // replaces its functionality and performance.
11114 //===----------------------------------------------------------------------===//
11116 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11117 bool hasInt256, unsigned *MaskOut = nullptr) {
11118 MVT EltVT = VT.getVectorElementType();
11120 // There is no blend with immediate in AVX-512.
11121 if (VT.is512BitVector())
11124 if (!hasSSE41 || EltVT == MVT::i8)
11126 if (!hasInt256 && VT == MVT::v16i16)
11129 unsigned MaskValue = 0;
11130 unsigned NumElems = VT.getVectorNumElements();
11131 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11132 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11133 unsigned NumElemsInLane = NumElems / NumLanes;
11135 // Blend for v16i16 should be symetric for the both lanes.
11136 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11138 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11139 int EltIdx = MaskVals[i];
11141 if ((EltIdx < 0 || EltIdx == (int)i) &&
11142 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11145 if (((unsigned)EltIdx == (i + NumElems)) &&
11146 (SndLaneEltIdx < 0 ||
11147 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11148 MaskValue |= (1 << i);
11154 *MaskOut = MaskValue;
11158 // Try to lower a shuffle node into a simple blend instruction.
11159 // This function assumes isBlendMask returns true for this
11160 // SuffleVectorSDNode
11161 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11162 unsigned MaskValue,
11163 const X86Subtarget *Subtarget,
11164 SelectionDAG &DAG) {
11165 MVT VT = SVOp->getSimpleValueType(0);
11166 MVT EltVT = VT.getVectorElementType();
11167 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11168 Subtarget->hasInt256() && "Trying to lower a "
11169 "VECTOR_SHUFFLE to a Blend but "
11170 "with the wrong mask"));
11171 SDValue V1 = SVOp->getOperand(0);
11172 SDValue V2 = SVOp->getOperand(1);
11174 unsigned NumElems = VT.getVectorNumElements();
11176 // Convert i32 vectors to floating point if it is not AVX2.
11177 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11179 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11180 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11182 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11183 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11186 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11187 DAG.getConstant(MaskValue, MVT::i32));
11188 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11191 /// In vector type \p VT, return true if the element at index \p InputIdx
11192 /// falls on a different 128-bit lane than \p OutputIdx.
11193 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11194 unsigned OutputIdx) {
11195 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11196 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11199 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11200 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11201 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11202 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11204 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11205 SelectionDAG &DAG) {
11206 MVT VT = V1.getSimpleValueType();
11207 assert(VT.is128BitVector() || VT.is256BitVector());
11209 MVT EltVT = VT.getVectorElementType();
11210 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11211 unsigned NumElts = VT.getVectorNumElements();
11213 SmallVector<SDValue, 32> PshufbMask;
11214 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11215 int InputIdx = MaskVals[OutputIdx];
11216 unsigned InputByteIdx;
11218 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11219 InputByteIdx = 0x80;
11221 // Cross lane is not allowed.
11222 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11224 InputByteIdx = InputIdx * EltSizeInBytes;
11225 // Index is an byte offset within the 128-bit lane.
11226 InputByteIdx &= 0xf;
11229 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11230 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11231 if (InputByteIdx != 0x80)
11236 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11238 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11239 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11240 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11243 // v8i16 shuffles - Prefer shuffles in the following order:
11244 // 1. [all] pshuflw, pshufhw, optional move
11245 // 2. [ssse3] 1 x pshufb
11246 // 3. [ssse3] 2 x pshufb + 1 x por
11247 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11249 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11250 SelectionDAG &DAG) {
11251 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11252 SDValue V1 = SVOp->getOperand(0);
11253 SDValue V2 = SVOp->getOperand(1);
11255 SmallVector<int, 8> MaskVals;
11257 // Determine if more than 1 of the words in each of the low and high quadwords
11258 // of the result come from the same quadword of one of the two inputs. Undef
11259 // mask values count as coming from any quadword, for better codegen.
11261 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11262 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11263 unsigned LoQuad[] = { 0, 0, 0, 0 };
11264 unsigned HiQuad[] = { 0, 0, 0, 0 };
11265 // Indices of quads used.
11266 std::bitset<4> InputQuads;
11267 for (unsigned i = 0; i < 8; ++i) {
11268 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11269 int EltIdx = SVOp->getMaskElt(i);
11270 MaskVals.push_back(EltIdx);
11278 ++Quad[EltIdx / 4];
11279 InputQuads.set(EltIdx / 4);
11282 int BestLoQuad = -1;
11283 unsigned MaxQuad = 1;
11284 for (unsigned i = 0; i < 4; ++i) {
11285 if (LoQuad[i] > MaxQuad) {
11287 MaxQuad = LoQuad[i];
11291 int BestHiQuad = -1;
11293 for (unsigned i = 0; i < 4; ++i) {
11294 if (HiQuad[i] > MaxQuad) {
11296 MaxQuad = HiQuad[i];
11300 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11301 // of the two input vectors, shuffle them into one input vector so only a
11302 // single pshufb instruction is necessary. If there are more than 2 input
11303 // quads, disable the next transformation since it does not help SSSE3.
11304 bool V1Used = InputQuads[0] || InputQuads[1];
11305 bool V2Used = InputQuads[2] || InputQuads[3];
11306 if (Subtarget->hasSSSE3()) {
11307 if (InputQuads.count() == 2 && V1Used && V2Used) {
11308 BestLoQuad = InputQuads[0] ? 0 : 1;
11309 BestHiQuad = InputQuads[2] ? 2 : 3;
11311 if (InputQuads.count() > 2) {
11317 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11318 // the shuffle mask. If a quad is scored as -1, that means that it contains
11319 // words from all 4 input quadwords.
11321 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11323 BestLoQuad < 0 ? 0 : BestLoQuad,
11324 BestHiQuad < 0 ? 1 : BestHiQuad
11326 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11327 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11328 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11329 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11331 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11332 // source words for the shuffle, to aid later transformations.
11333 bool AllWordsInNewV = true;
11334 bool InOrder[2] = { true, true };
11335 for (unsigned i = 0; i != 8; ++i) {
11336 int idx = MaskVals[i];
11338 InOrder[i/4] = false;
11339 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11341 AllWordsInNewV = false;
11345 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11346 if (AllWordsInNewV) {
11347 for (int i = 0; i != 8; ++i) {
11348 int idx = MaskVals[i];
11351 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11352 if ((idx != i) && idx < 4)
11354 if ((idx != i) && idx > 3)
11363 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11364 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11365 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11366 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11367 unsigned TargetMask = 0;
11368 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11369 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11370 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11371 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11372 getShufflePSHUFLWImmediate(SVOp);
11373 V1 = NewV.getOperand(0);
11374 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11378 // Promote splats to a larger type which usually leads to more efficient code.
11379 // FIXME: Is this true if pshufb is available?
11380 if (SVOp->isSplat())
11381 return PromoteSplat(SVOp, DAG);
11383 // If we have SSSE3, and all words of the result are from 1 input vector,
11384 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11385 // is present, fall back to case 4.
11386 if (Subtarget->hasSSSE3()) {
11387 SmallVector<SDValue,16> pshufbMask;
11389 // If we have elements from both input vectors, set the high bit of the
11390 // shuffle mask element to zero out elements that come from V2 in the V1
11391 // mask, and elements that come from V1 in the V2 mask, so that the two
11392 // results can be OR'd together.
11393 bool TwoInputs = V1Used && V2Used;
11394 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11396 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11398 // Calculate the shuffle mask for the second input, shuffle it, and
11399 // OR it with the first shuffled input.
11400 CommuteVectorShuffleMask(MaskVals, 8);
11401 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11402 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11403 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11406 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11407 // and update MaskVals with new element order.
11408 std::bitset<8> InOrder;
11409 if (BestLoQuad >= 0) {
11410 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11411 for (int i = 0; i != 4; ++i) {
11412 int idx = MaskVals[i];
11415 } else if ((idx / 4) == BestLoQuad) {
11416 MaskV[i] = idx & 3;
11420 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11423 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11424 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11425 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11426 NewV.getOperand(0),
11427 getShufflePSHUFLWImmediate(SVOp), DAG);
11431 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11432 // and update MaskVals with the new element order.
11433 if (BestHiQuad >= 0) {
11434 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11435 for (unsigned i = 4; i != 8; ++i) {
11436 int idx = MaskVals[i];
11439 } else if ((idx / 4) == BestHiQuad) {
11440 MaskV[i] = (idx & 3) + 4;
11444 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11447 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11448 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11449 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11450 NewV.getOperand(0),
11451 getShufflePSHUFHWImmediate(SVOp), DAG);
11455 // In case BestHi & BestLo were both -1, which means each quadword has a word
11456 // from each of the four input quadwords, calculate the InOrder bitvector now
11457 // before falling through to the insert/extract cleanup.
11458 if (BestLoQuad == -1 && BestHiQuad == -1) {
11460 for (int i = 0; i != 8; ++i)
11461 if (MaskVals[i] < 0 || MaskVals[i] == i)
11465 // The other elements are put in the right place using pextrw and pinsrw.
11466 for (unsigned i = 0; i != 8; ++i) {
11469 int EltIdx = MaskVals[i];
11472 SDValue ExtOp = (EltIdx < 8) ?
11473 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11474 DAG.getIntPtrConstant(EltIdx)) :
11475 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11476 DAG.getIntPtrConstant(EltIdx - 8));
11477 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11478 DAG.getIntPtrConstant(i));
11483 /// \brief v16i16 shuffles
11485 /// FIXME: We only support generation of a single pshufb currently. We can
11486 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11487 /// well (e.g 2 x pshufb + 1 x por).
11489 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11490 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11491 SDValue V1 = SVOp->getOperand(0);
11492 SDValue V2 = SVOp->getOperand(1);
11495 if (V2.getOpcode() != ISD::UNDEF)
11498 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11499 return getPSHUFB(MaskVals, V1, dl, DAG);
11502 // v16i8 shuffles - Prefer shuffles in the following order:
11503 // 1. [ssse3] 1 x pshufb
11504 // 2. [ssse3] 2 x pshufb + 1 x por
11505 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11506 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11507 const X86Subtarget* Subtarget,
11508 SelectionDAG &DAG) {
11509 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11510 SDValue V1 = SVOp->getOperand(0);
11511 SDValue V2 = SVOp->getOperand(1);
11513 ArrayRef<int> MaskVals = SVOp->getMask();
11515 // Promote splats to a larger type which usually leads to more efficient code.
11516 // FIXME: Is this true if pshufb is available?
11517 if (SVOp->isSplat())
11518 return PromoteSplat(SVOp, DAG);
11520 // If we have SSSE3, case 1 is generated when all result bytes come from
11521 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11522 // present, fall back to case 3.
11524 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11525 if (Subtarget->hasSSSE3()) {
11526 SmallVector<SDValue,16> pshufbMask;
11528 // If all result elements are from one input vector, then only translate
11529 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11531 // Otherwise, we have elements from both input vectors, and must zero out
11532 // elements that come from V2 in the first mask, and V1 in the second mask
11533 // so that we can OR them together.
11534 for (unsigned i = 0; i != 16; ++i) {
11535 int EltIdx = MaskVals[i];
11536 if (EltIdx < 0 || EltIdx >= 16)
11538 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11540 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11541 DAG.getNode(ISD::BUILD_VECTOR, dl,
11542 MVT::v16i8, pshufbMask));
11544 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11545 // the 2nd operand if it's undefined or zero.
11546 if (V2.getOpcode() == ISD::UNDEF ||
11547 ISD::isBuildVectorAllZeros(V2.getNode()))
11550 // Calculate the shuffle mask for the second input, shuffle it, and
11551 // OR it with the first shuffled input.
11552 pshufbMask.clear();
11553 for (unsigned i = 0; i != 16; ++i) {
11554 int EltIdx = MaskVals[i];
11555 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11556 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11558 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11559 DAG.getNode(ISD::BUILD_VECTOR, dl,
11560 MVT::v16i8, pshufbMask));
11561 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11564 // No SSSE3 - Calculate in place words and then fix all out of place words
11565 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11566 // the 16 different words that comprise the two doublequadword input vectors.
11567 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11568 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11570 for (int i = 0; i != 8; ++i) {
11571 int Elt0 = MaskVals[i*2];
11572 int Elt1 = MaskVals[i*2+1];
11574 // This word of the result is all undef, skip it.
11575 if (Elt0 < 0 && Elt1 < 0)
11578 // This word of the result is already in the correct place, skip it.
11579 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11582 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11583 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11586 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11587 // using a single extract together, load it and store it.
11588 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11589 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11590 DAG.getIntPtrConstant(Elt1 / 2));
11591 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11592 DAG.getIntPtrConstant(i));
11596 // If Elt1 is defined, extract it from the appropriate source. If the
11597 // source byte is not also odd, shift the extracted word left 8 bits
11598 // otherwise clear the bottom 8 bits if we need to do an or.
11600 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11601 DAG.getIntPtrConstant(Elt1 / 2));
11602 if ((Elt1 & 1) == 0)
11603 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11605 TLI.getShiftAmountTy(InsElt.getValueType())));
11606 else if (Elt0 >= 0)
11607 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11608 DAG.getConstant(0xFF00, MVT::i16));
11610 // If Elt0 is defined, extract it from the appropriate source. If the
11611 // source byte is not also even, shift the extracted word right 8 bits. If
11612 // Elt1 was also defined, OR the extracted values together before
11613 // inserting them in the result.
11615 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11616 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11617 if ((Elt0 & 1) != 0)
11618 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11620 TLI.getShiftAmountTy(InsElt0.getValueType())));
11621 else if (Elt1 >= 0)
11622 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11623 DAG.getConstant(0x00FF, MVT::i16));
11624 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11627 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11628 DAG.getIntPtrConstant(i));
11630 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11633 // v32i8 shuffles - Translate to VPSHUFB if possible.
11635 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11636 const X86Subtarget *Subtarget,
11637 SelectionDAG &DAG) {
11638 MVT VT = SVOp->getSimpleValueType(0);
11639 SDValue V1 = SVOp->getOperand(0);
11640 SDValue V2 = SVOp->getOperand(1);
11642 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11644 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11645 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11646 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11648 // VPSHUFB may be generated if
11649 // (1) one of input vector is undefined or zeroinitializer.
11650 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11651 // And (2) the mask indexes don't cross the 128-bit lane.
11652 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11653 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11656 if (V1IsAllZero && !V2IsAllZero) {
11657 CommuteVectorShuffleMask(MaskVals, 32);
11660 return getPSHUFB(MaskVals, V1, dl, DAG);
11663 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11664 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11665 /// done when every pair / quad of shuffle mask elements point to elements in
11666 /// the right sequence. e.g.
11667 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11669 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11670 SelectionDAG &DAG) {
11671 MVT VT = SVOp->getSimpleValueType(0);
11673 unsigned NumElems = VT.getVectorNumElements();
11676 switch (VT.SimpleTy) {
11677 default: llvm_unreachable("Unexpected!");
11680 return SDValue(SVOp, 0);
11681 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11682 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11683 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11684 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11685 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11686 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11689 SmallVector<int, 8> MaskVec;
11690 for (unsigned i = 0; i != NumElems; i += Scale) {
11692 for (unsigned j = 0; j != Scale; ++j) {
11693 int EltIdx = SVOp->getMaskElt(i+j);
11697 StartIdx = (EltIdx / Scale);
11698 if (EltIdx != (int)(StartIdx*Scale + j))
11701 MaskVec.push_back(StartIdx);
11704 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
11705 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
11706 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
11709 /// getVZextMovL - Return a zero-extending vector move low node.
11711 static SDValue getVZextMovL(MVT VT, MVT OpVT,
11712 SDValue SrcOp, SelectionDAG &DAG,
11713 const X86Subtarget *Subtarget, SDLoc dl) {
11714 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
11715 LoadSDNode *LD = nullptr;
11716 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
11717 LD = dyn_cast<LoadSDNode>(SrcOp);
11719 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
11721 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
11722 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
11723 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
11724 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
11725 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
11727 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
11728 return DAG.getNode(ISD::BITCAST, dl, VT,
11729 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11730 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11732 SrcOp.getOperand(0)
11738 return DAG.getNode(ISD::BITCAST, dl, VT,
11739 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11740 DAG.getNode(ISD::BITCAST, dl,
11744 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
11745 /// which could not be matched by any known target speficic shuffle
11747 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11749 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
11750 if (NewOp.getNode())
11753 MVT VT = SVOp->getSimpleValueType(0);
11755 unsigned NumElems = VT.getVectorNumElements();
11756 unsigned NumLaneElems = NumElems / 2;
11759 MVT EltVT = VT.getVectorElementType();
11760 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
11763 SmallVector<int, 16> Mask;
11764 for (unsigned l = 0; l < 2; ++l) {
11765 // Build a shuffle mask for the output, discovering on the fly which
11766 // input vectors to use as shuffle operands (recorded in InputUsed).
11767 // If building a suitable shuffle vector proves too hard, then bail
11768 // out with UseBuildVector set.
11769 bool UseBuildVector = false;
11770 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
11771 unsigned LaneStart = l * NumLaneElems;
11772 for (unsigned i = 0; i != NumLaneElems; ++i) {
11773 // The mask element. This indexes into the input.
11774 int Idx = SVOp->getMaskElt(i+LaneStart);
11776 // the mask element does not index into any input vector.
11777 Mask.push_back(-1);
11781 // The input vector this mask element indexes into.
11782 int Input = Idx / NumLaneElems;
11784 // Turn the index into an offset from the start of the input vector.
11785 Idx -= Input * NumLaneElems;
11787 // Find or create a shuffle vector operand to hold this input.
11789 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
11790 if (InputUsed[OpNo] == Input)
11791 // This input vector is already an operand.
11793 if (InputUsed[OpNo] < 0) {
11794 // Create a new operand for this input vector.
11795 InputUsed[OpNo] = Input;
11800 if (OpNo >= array_lengthof(InputUsed)) {
11801 // More than two input vectors used! Give up on trying to create a
11802 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
11803 UseBuildVector = true;
11807 // Add the mask index for the new shuffle vector.
11808 Mask.push_back(Idx + OpNo * NumLaneElems);
11811 if (UseBuildVector) {
11812 SmallVector<SDValue, 16> SVOps;
11813 for (unsigned i = 0; i != NumLaneElems; ++i) {
11814 // The mask element. This indexes into the input.
11815 int Idx = SVOp->getMaskElt(i+LaneStart);
11817 SVOps.push_back(DAG.getUNDEF(EltVT));
11821 // The input vector this mask element indexes into.
11822 int Input = Idx / NumElems;
11824 // Turn the index into an offset from the start of the input vector.
11825 Idx -= Input * NumElems;
11827 // Extract the vector element by hand.
11828 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
11829 SVOp->getOperand(Input),
11830 DAG.getIntPtrConstant(Idx)));
11833 // Construct the output using a BUILD_VECTOR.
11834 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
11835 } else if (InputUsed[0] < 0) {
11836 // No input vectors were used! The result is undefined.
11837 Output[l] = DAG.getUNDEF(NVT);
11839 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
11840 (InputUsed[0] % 2) * NumLaneElems,
11842 // If only one input was used, use an undefined vector for the other.
11843 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
11844 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
11845 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
11846 // At least one input vector was used. Create a new shuffle vector.
11847 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
11853 // Concatenate the result back
11854 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
11857 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
11858 /// 4 elements, and match them with several different shuffle types.
11860 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11861 SDValue V1 = SVOp->getOperand(0);
11862 SDValue V2 = SVOp->getOperand(1);
11864 MVT VT = SVOp->getSimpleValueType(0);
11866 assert(VT.is128BitVector() && "Unsupported vector size");
11868 std::pair<int, int> Locs[4];
11869 int Mask1[] = { -1, -1, -1, -1 };
11870 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
11872 unsigned NumHi = 0;
11873 unsigned NumLo = 0;
11874 for (unsigned i = 0; i != 4; ++i) {
11875 int Idx = PermMask[i];
11877 Locs[i] = std::make_pair(-1, -1);
11879 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
11881 Locs[i] = std::make_pair(0, NumLo);
11882 Mask1[NumLo] = Idx;
11885 Locs[i] = std::make_pair(1, NumHi);
11887 Mask1[2+NumHi] = Idx;
11893 if (NumLo <= 2 && NumHi <= 2) {
11894 // If no more than two elements come from either vector. This can be
11895 // implemented with two shuffles. First shuffle gather the elements.
11896 // The second shuffle, which takes the first shuffle as both of its
11897 // vector operands, put the elements into the right order.
11898 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11900 int Mask2[] = { -1, -1, -1, -1 };
11902 for (unsigned i = 0; i != 4; ++i)
11903 if (Locs[i].first != -1) {
11904 unsigned Idx = (i < 2) ? 0 : 4;
11905 Idx += Locs[i].first * 2 + Locs[i].second;
11909 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
11912 if (NumLo == 3 || NumHi == 3) {
11913 // Otherwise, we must have three elements from one vector, call it X, and
11914 // one element from the other, call it Y. First, use a shufps to build an
11915 // intermediate vector with the one element from Y and the element from X
11916 // that will be in the same half in the final destination (the indexes don't
11917 // matter). Then, use a shufps to build the final vector, taking the half
11918 // containing the element from Y from the intermediate, and the other half
11921 // Normalize it so the 3 elements come from V1.
11922 CommuteVectorShuffleMask(PermMask, 4);
11926 // Find the element from V2.
11928 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
11929 int Val = PermMask[HiIndex];
11936 Mask1[0] = PermMask[HiIndex];
11938 Mask1[2] = PermMask[HiIndex^1];
11940 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11942 if (HiIndex >= 2) {
11943 Mask1[0] = PermMask[0];
11944 Mask1[1] = PermMask[1];
11945 Mask1[2] = HiIndex & 1 ? 6 : 4;
11946 Mask1[3] = HiIndex & 1 ? 4 : 6;
11947 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11950 Mask1[0] = HiIndex & 1 ? 2 : 0;
11951 Mask1[1] = HiIndex & 1 ? 0 : 2;
11952 Mask1[2] = PermMask[2];
11953 Mask1[3] = PermMask[3];
11958 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
11961 // Break it into (shuffle shuffle_hi, shuffle_lo).
11962 int LoMask[] = { -1, -1, -1, -1 };
11963 int HiMask[] = { -1, -1, -1, -1 };
11965 int *MaskPtr = LoMask;
11966 unsigned MaskIdx = 0;
11967 unsigned LoIdx = 0;
11968 unsigned HiIdx = 2;
11969 for (unsigned i = 0; i != 4; ++i) {
11976 int Idx = PermMask[i];
11978 Locs[i] = std::make_pair(-1, -1);
11979 } else if (Idx < 4) {
11980 Locs[i] = std::make_pair(MaskIdx, LoIdx);
11981 MaskPtr[LoIdx] = Idx;
11984 Locs[i] = std::make_pair(MaskIdx, HiIdx);
11985 MaskPtr[HiIdx] = Idx;
11990 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
11991 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
11992 int MaskOps[] = { -1, -1, -1, -1 };
11993 for (unsigned i = 0; i != 4; ++i)
11994 if (Locs[i].first != -1)
11995 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
11996 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
11999 static bool MayFoldVectorLoad(SDValue V) {
12000 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12001 V = V.getOperand(0);
12003 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12004 V = V.getOperand(0);
12005 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12006 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12007 // BUILD_VECTOR (load), undef
12008 V = V.getOperand(0);
12010 return MayFoldLoad(V);
12014 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12015 MVT VT = Op.getSimpleValueType();
12017 // Canonizalize to v2f64.
12018 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12019 return DAG.getNode(ISD::BITCAST, dl, VT,
12020 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12025 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12027 SDValue V1 = Op.getOperand(0);
12028 SDValue V2 = Op.getOperand(1);
12029 MVT VT = Op.getSimpleValueType();
12031 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12033 if (HasSSE2 && VT == MVT::v2f64)
12034 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12036 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1)
12037 return DAG.getNode(ISD::BITCAST, dl, VT,
12038 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12039 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12040 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12044 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12045 SDValue V1 = Op.getOperand(0);
12046 SDValue V2 = Op.getOperand(1);
12047 MVT VT = Op.getSimpleValueType();
12049 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12050 "unsupported shuffle type");
12052 if (V2.getOpcode() == ISD::UNDEF)
12056 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12060 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12061 SDValue V1 = Op.getOperand(0);
12062 SDValue V2 = Op.getOperand(1);
12063 MVT VT = Op.getSimpleValueType();
12064 unsigned NumElems = VT.getVectorNumElements();
12066 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12067 // operand of these instructions is only memory, so check if there's a
12068 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12070 bool CanFoldLoad = false;
12072 // Trivial case, when V2 comes from a load.
12073 if (MayFoldVectorLoad(V2))
12074 CanFoldLoad = true;
12076 // When V1 is a load, it can be folded later into a store in isel, example:
12077 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12079 // (MOVLPSmr addr:$src1, VR128:$src2)
12080 // So, recognize this potential and also use MOVLPS or MOVLPD
12081 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12082 CanFoldLoad = true;
12084 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12086 if (HasSSE2 && NumElems == 2)
12087 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12090 // If we don't care about the second element, proceed to use movss.
12091 if (SVOp->getMaskElt(1) != -1)
12092 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12095 // movl and movlp will both match v2i64, but v2i64 is never matched by
12096 // movl earlier because we make it strict to avoid messing with the movlp load
12097 // folding logic (see the code above getMOVLP call). Match it here then,
12098 // this is horrible, but will stay like this until we move all shuffle
12099 // matching to x86 specific nodes. Note that for the 1st condition all
12100 // types are matched with movsd.
12102 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12103 // as to remove this logic from here, as much as possible
12104 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12105 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12106 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12109 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12111 // Invert the operand order and use SHUFPS to match it.
12112 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12113 getShuffleSHUFImmediate(SVOp), DAG);
12116 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12117 SelectionDAG &DAG) {
12119 MVT VT = Load->getSimpleValueType(0);
12120 MVT EVT = VT.getVectorElementType();
12121 SDValue Addr = Load->getOperand(1);
12122 SDValue NewAddr = DAG.getNode(
12123 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12124 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12127 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12128 DAG.getMachineFunction().getMachineMemOperand(
12129 Load->getMemOperand(), 0, EVT.getStoreSize()));
12133 // It is only safe to call this function if isINSERTPSMask is true for
12134 // this shufflevector mask.
12135 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12136 SelectionDAG &DAG) {
12137 // Generate an insertps instruction when inserting an f32 from memory onto a
12138 // v4f32 or when copying a member from one v4f32 to another.
12139 // We also use it for transferring i32 from one register to another,
12140 // since it simply copies the same bits.
12141 // If we're transferring an i32 from memory to a specific element in a
12142 // register, we output a generic DAG that will match the PINSRD
12144 MVT VT = SVOp->getSimpleValueType(0);
12145 MVT EVT = VT.getVectorElementType();
12146 SDValue V1 = SVOp->getOperand(0);
12147 SDValue V2 = SVOp->getOperand(1);
12148 auto Mask = SVOp->getMask();
12149 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12150 "unsupported vector type for insertps/pinsrd");
12152 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12153 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12154 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12158 unsigned DestIndex;
12162 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12165 // If we have 1 element from each vector, we have to check if we're
12166 // changing V1's element's place. If so, we're done. Otherwise, we
12167 // should assume we're changing V2's element's place and behave
12169 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12170 assert(DestIndex <= INT32_MAX && "truncated destination index");
12171 if (FromV1 == FromV2 &&
12172 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12176 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12179 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12180 "More than one element from V1 and from V2, or no elements from one "
12181 "of the vectors. This case should not have returned true from "
12186 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12189 // Get an index into the source vector in the range [0,4) (the mask is
12190 // in the range [0,8) because it can address V1 and V2)
12191 unsigned SrcIndex = Mask[DestIndex] % 4;
12192 if (MayFoldLoad(From)) {
12193 // Trivial case, when From comes from a load and is only used by the
12194 // shuffle. Make it use insertps from the vector that we need from that
12197 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12198 if (!NewLoad.getNode())
12201 if (EVT == MVT::f32) {
12202 // Create this as a scalar to vector to match the instruction pattern.
12203 SDValue LoadScalarToVector =
12204 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12205 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12206 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12208 } else { // EVT == MVT::i32
12209 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12210 // instruction, to match the PINSRD instruction, which loads an i32 to a
12211 // certain vector element.
12212 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12213 DAG.getConstant(DestIndex, MVT::i32));
12217 // Vector-element-to-vector
12218 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12219 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12222 // Reduce a vector shuffle to zext.
12223 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12224 SelectionDAG &DAG) {
12225 // PMOVZX is only available from SSE41.
12226 if (!Subtarget->hasSSE41())
12229 MVT VT = Op.getSimpleValueType();
12231 // Only AVX2 support 256-bit vector integer extending.
12232 if (!Subtarget->hasInt256() && VT.is256BitVector())
12235 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12237 SDValue V1 = Op.getOperand(0);
12238 SDValue V2 = Op.getOperand(1);
12239 unsigned NumElems = VT.getVectorNumElements();
12241 // Extending is an unary operation and the element type of the source vector
12242 // won't be equal to or larger than i64.
12243 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12244 VT.getVectorElementType() == MVT::i64)
12247 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12248 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12249 while ((1U << Shift) < NumElems) {
12250 if (SVOp->getMaskElt(1U << Shift) == 1)
12253 // The maximal ratio is 8, i.e. from i8 to i64.
12258 // Check the shuffle mask.
12259 unsigned Mask = (1U << Shift) - 1;
12260 for (unsigned i = 0; i != NumElems; ++i) {
12261 int EltIdx = SVOp->getMaskElt(i);
12262 if ((i & Mask) != 0 && EltIdx != -1)
12264 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12268 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12269 MVT NeVT = MVT::getIntegerVT(NBits);
12270 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12272 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12275 return DAG.getNode(ISD::BITCAST, DL, VT,
12276 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12279 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12280 SelectionDAG &DAG) {
12281 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12282 MVT VT = Op.getSimpleValueType();
12284 SDValue V1 = Op.getOperand(0);
12285 SDValue V2 = Op.getOperand(1);
12287 if (isZeroShuffle(SVOp))
12288 return getZeroVector(VT, Subtarget, DAG, dl);
12290 // Handle splat operations
12291 if (SVOp->isSplat()) {
12292 // Use vbroadcast whenever the splat comes from a foldable load
12293 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12294 if (Broadcast.getNode())
12298 // Check integer expanding shuffles.
12299 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12300 if (NewOp.getNode())
12303 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12305 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12306 VT == MVT::v32i8) {
12307 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12308 if (NewOp.getNode())
12309 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12310 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12311 // FIXME: Figure out a cleaner way to do this.
12312 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12313 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12314 if (NewOp.getNode()) {
12315 MVT NewVT = NewOp.getSimpleValueType();
12316 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12317 NewVT, true, false))
12318 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12321 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12322 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12323 if (NewOp.getNode()) {
12324 MVT NewVT = NewOp.getSimpleValueType();
12325 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12326 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12335 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12336 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12337 SDValue V1 = Op.getOperand(0);
12338 SDValue V2 = Op.getOperand(1);
12339 MVT VT = Op.getSimpleValueType();
12341 unsigned NumElems = VT.getVectorNumElements();
12342 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12343 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12344 bool V1IsSplat = false;
12345 bool V2IsSplat = false;
12346 bool HasSSE2 = Subtarget->hasSSE2();
12347 bool HasFp256 = Subtarget->hasFp256();
12348 bool HasInt256 = Subtarget->hasInt256();
12349 MachineFunction &MF = DAG.getMachineFunction();
12350 bool OptForSize = MF.getFunction()->getAttributes().
12351 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
12353 // Check if we should use the experimental vector shuffle lowering. If so,
12354 // delegate completely to that code path.
12355 if (ExperimentalVectorShuffleLowering)
12356 return lowerVectorShuffle(Op, Subtarget, DAG);
12358 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12360 if (V1IsUndef && V2IsUndef)
12361 return DAG.getUNDEF(VT);
12363 // When we create a shuffle node we put the UNDEF node to second operand,
12364 // but in some cases the first operand may be transformed to UNDEF.
12365 // In this case we should just commute the node.
12367 return DAG.getCommutedVectorShuffle(*SVOp);
12369 // Vector shuffle lowering takes 3 steps:
12371 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12372 // narrowing and commutation of operands should be handled.
12373 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12375 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12376 // so the shuffle can be broken into other shuffles and the legalizer can
12377 // try the lowering again.
12379 // The general idea is that no vector_shuffle operation should be left to
12380 // be matched during isel, all of them must be converted to a target specific
12383 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12384 // narrowing and commutation of operands should be handled. The actual code
12385 // doesn't include all of those, work in progress...
12386 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12387 if (NewOp.getNode())
12390 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12392 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12393 // unpckh_undef). Only use pshufd if speed is more important than size.
12394 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12395 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12396 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12397 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12399 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12400 V2IsUndef && MayFoldVectorLoad(V1))
12401 return getMOVDDup(Op, dl, V1, DAG);
12403 if (isMOVHLPS_v_undef_Mask(M, VT))
12404 return getMOVHighToLow(Op, dl, DAG);
12406 // Use to match splats
12407 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12408 (VT == MVT::v2f64 || VT == MVT::v2i64))
12409 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12411 if (isPSHUFDMask(M, VT)) {
12412 // The actual implementation will match the mask in the if above and then
12413 // during isel it can match several different instructions, not only pshufd
12414 // as its name says, sad but true, emulate the behavior for now...
12415 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12416 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12418 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12420 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12421 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12423 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12424 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12427 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12431 if (isPALIGNRMask(M, VT, Subtarget))
12432 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12433 getShufflePALIGNRImmediate(SVOp),
12436 if (isVALIGNMask(M, VT, Subtarget))
12437 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12438 getShuffleVALIGNImmediate(SVOp),
12441 // Check if this can be converted into a logical shift.
12442 bool isLeft = false;
12443 unsigned ShAmt = 0;
12445 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12446 if (isShift && ShVal.hasOneUse()) {
12447 // If the shifted value has multiple uses, it may be cheaper to use
12448 // v_set0 + movlhps or movhlps, etc.
12449 MVT EltVT = VT.getVectorElementType();
12450 ShAmt *= EltVT.getSizeInBits();
12451 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12454 if (isMOVLMask(M, VT)) {
12455 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12456 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12457 if (!isMOVLPMask(M, VT)) {
12458 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12459 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12461 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12462 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12466 // FIXME: fold these into legal mask.
12467 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12468 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12470 if (isMOVHLPSMask(M, VT))
12471 return getMOVHighToLow(Op, dl, DAG);
12473 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12474 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12476 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12477 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12479 if (isMOVLPMask(M, VT))
12480 return getMOVLP(Op, dl, DAG, HasSSE2);
12482 if (ShouldXformToMOVHLPS(M, VT) ||
12483 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12484 return DAG.getCommutedVectorShuffle(*SVOp);
12487 // No better options. Use a vshldq / vsrldq.
12488 MVT EltVT = VT.getVectorElementType();
12489 ShAmt *= EltVT.getSizeInBits();
12490 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12493 bool Commuted = false;
12494 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12495 // 1,1,1,1 -> v8i16 though.
12496 BitVector UndefElements;
12497 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12498 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12500 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12501 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12504 // Canonicalize the splat or undef, if present, to be on the RHS.
12505 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12506 CommuteVectorShuffleMask(M, NumElems);
12508 std::swap(V1IsSplat, V2IsSplat);
12512 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12513 // Shuffling low element of v1 into undef, just return v1.
12516 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12517 // the instruction selector will not match, so get a canonical MOVL with
12518 // swapped operands to undo the commute.
12519 return getMOVL(DAG, dl, VT, V2, V1);
12522 if (isUNPCKLMask(M, VT, HasInt256))
12523 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12525 if (isUNPCKHMask(M, VT, HasInt256))
12526 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12529 // Normalize mask so all entries that point to V2 points to its first
12530 // element then try to match unpck{h|l} again. If match, return a
12531 // new vector_shuffle with the corrected mask.p
12532 SmallVector<int, 8> NewMask(M.begin(), M.end());
12533 NormalizeMask(NewMask, NumElems);
12534 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12535 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12536 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12537 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12541 // Commute is back and try unpck* again.
12542 // FIXME: this seems wrong.
12543 CommuteVectorShuffleMask(M, NumElems);
12545 std::swap(V1IsSplat, V2IsSplat);
12547 if (isUNPCKLMask(M, VT, HasInt256))
12548 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12550 if (isUNPCKHMask(M, VT, HasInt256))
12551 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12554 // Normalize the node to match x86 shuffle ops if needed
12555 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12556 return DAG.getCommutedVectorShuffle(*SVOp);
12558 // The checks below are all present in isShuffleMaskLegal, but they are
12559 // inlined here right now to enable us to directly emit target specific
12560 // nodes, and remove one by one until they don't return Op anymore.
12562 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12563 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12564 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12565 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12568 if (isPSHUFHWMask(M, VT, HasInt256))
12569 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12570 getShufflePSHUFHWImmediate(SVOp),
12573 if (isPSHUFLWMask(M, VT, HasInt256))
12574 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12575 getShufflePSHUFLWImmediate(SVOp),
12578 unsigned MaskValue;
12579 if (isBlendMask(M, VT, Subtarget->hasSSE41(), Subtarget->hasInt256(),
12581 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12583 if (isSHUFPMask(M, VT))
12584 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12585 getShuffleSHUFImmediate(SVOp), DAG);
12587 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12588 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12589 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12590 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12592 //===--------------------------------------------------------------------===//
12593 // Generate target specific nodes for 128 or 256-bit shuffles only
12594 // supported in the AVX instruction set.
12597 // Handle VMOVDDUPY permutations
12598 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12599 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12601 // Handle VPERMILPS/D* permutations
12602 if (isVPERMILPMask(M, VT)) {
12603 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12604 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12605 getShuffleSHUFImmediate(SVOp), DAG);
12606 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12607 getShuffleSHUFImmediate(SVOp), DAG);
12611 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12612 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12613 Idx*(NumElems/2), DAG, dl);
12615 // Handle VPERM2F128/VPERM2I128 permutations
12616 if (isVPERM2X128Mask(M, VT, HasFp256))
12617 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12618 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12620 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12621 return getINSERTPS(SVOp, dl, DAG);
12624 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12625 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12627 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12628 VT.is512BitVector()) {
12629 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12630 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12631 SmallVector<SDValue, 16> permclMask;
12632 for (unsigned i = 0; i != NumElems; ++i) {
12633 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12636 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12638 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12639 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12640 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12641 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12642 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12645 //===--------------------------------------------------------------------===//
12646 // Since no target specific shuffle was selected for this generic one,
12647 // lower it into other known shuffles. FIXME: this isn't true yet, but
12648 // this is the plan.
12651 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12652 if (VT == MVT::v8i16) {
12653 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12654 if (NewOp.getNode())
12658 if (VT == MVT::v16i16 && Subtarget->hasInt256()) {
12659 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12660 if (NewOp.getNode())
12664 if (VT == MVT::v16i8) {
12665 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12666 if (NewOp.getNode())
12670 if (VT == MVT::v32i8) {
12671 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12672 if (NewOp.getNode())
12676 // Handle all 128-bit wide vectors with 4 elements, and match them with
12677 // several different shuffle types.
12678 if (NumElems == 4 && VT.is128BitVector())
12679 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12681 // Handle general 256-bit shuffles
12682 if (VT.is256BitVector())
12683 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12688 // This function assumes its argument is a BUILD_VECTOR of constants or
12689 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12691 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12692 unsigned &MaskValue) {
12694 unsigned NumElems = BuildVector->getNumOperands();
12695 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12696 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12697 unsigned NumElemsInLane = NumElems / NumLanes;
12699 // Blend for v16i16 should be symetric for the both lanes.
12700 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12701 SDValue EltCond = BuildVector->getOperand(i);
12702 SDValue SndLaneEltCond =
12703 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
12705 int Lane1Cond = -1, Lane2Cond = -1;
12706 if (isa<ConstantSDNode>(EltCond))
12707 Lane1Cond = !isZero(EltCond);
12708 if (isa<ConstantSDNode>(SndLaneEltCond))
12709 Lane2Cond = !isZero(SndLaneEltCond);
12711 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
12712 // Lane1Cond != 0, means we want the first argument.
12713 // Lane1Cond == 0, means we want the second argument.
12714 // The encoding of this argument is 0 for the first argument, 1
12715 // for the second. Therefore, invert the condition.
12716 MaskValue |= !Lane1Cond << i;
12717 else if (Lane1Cond < 0)
12718 MaskValue |= !Lane2Cond << i;
12725 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
12727 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
12728 SelectionDAG &DAG) {
12729 SDValue Cond = Op.getOperand(0);
12730 SDValue LHS = Op.getOperand(1);
12731 SDValue RHS = Op.getOperand(2);
12733 MVT VT = Op.getSimpleValueType();
12734 MVT EltVT = VT.getVectorElementType();
12735 unsigned NumElems = VT.getVectorNumElements();
12737 // There is no blend with immediate in AVX-512.
12738 if (VT.is512BitVector())
12741 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
12743 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
12746 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
12749 // Check the mask for BLEND and build the value.
12750 unsigned MaskValue = 0;
12751 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
12754 // Convert i32 vectors to floating point if it is not AVX2.
12755 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
12757 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
12758 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
12760 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
12761 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
12764 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
12765 DAG.getConstant(MaskValue, MVT::i32));
12766 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
12769 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
12770 // A vselect where all conditions and data are constants can be optimized into
12771 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
12772 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
12773 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
12774 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
12777 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
12778 if (BlendOp.getNode())
12781 // Some types for vselect were previously set to Expand, not Legal or
12782 // Custom. Return an empty SDValue so we fall-through to Expand, after
12783 // the Custom lowering phase.
12784 MVT VT = Op.getSimpleValueType();
12785 switch (VT.SimpleTy) {
12790 if (Subtarget->hasBWI() && Subtarget->hasVLX())
12795 // We couldn't create a "Blend with immediate" node.
12796 // This node should still be legal, but we'll have to emit a blendv*
12801 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
12802 MVT VT = Op.getSimpleValueType();
12805 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
12808 if (VT.getSizeInBits() == 8) {
12809 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
12810 Op.getOperand(0), Op.getOperand(1));
12811 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12812 DAG.getValueType(VT));
12813 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12816 if (VT.getSizeInBits() == 16) {
12817 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12818 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
12820 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12821 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12822 DAG.getNode(ISD::BITCAST, dl,
12825 Op.getOperand(1)));
12826 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
12827 Op.getOperand(0), Op.getOperand(1));
12828 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12829 DAG.getValueType(VT));
12830 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12833 if (VT == MVT::f32) {
12834 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
12835 // the result back to FR32 register. It's only worth matching if the
12836 // result has a single use which is a store or a bitcast to i32. And in
12837 // the case of a store, it's not worth it if the index is a constant 0,
12838 // because a MOVSSmr can be used instead, which is smaller and faster.
12839 if (!Op.hasOneUse())
12841 SDNode *User = *Op.getNode()->use_begin();
12842 if ((User->getOpcode() != ISD::STORE ||
12843 (isa<ConstantSDNode>(Op.getOperand(1)) &&
12844 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
12845 (User->getOpcode() != ISD::BITCAST ||
12846 User->getValueType(0) != MVT::i32))
12848 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12849 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
12852 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
12855 if (VT == MVT::i32 || VT == MVT::i64) {
12856 // ExtractPS/pextrq works with constant index.
12857 if (isa<ConstantSDNode>(Op.getOperand(1)))
12863 /// Extract one bit from mask vector, like v16i1 or v8i1.
12864 /// AVX-512 feature.
12866 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
12867 SDValue Vec = Op.getOperand(0);
12869 MVT VecVT = Vec.getSimpleValueType();
12870 SDValue Idx = Op.getOperand(1);
12871 MVT EltVT = Op.getSimpleValueType();
12873 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
12875 // variable index can't be handled in mask registers,
12876 // extend vector to VR512
12877 if (!isa<ConstantSDNode>(Idx)) {
12878 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
12879 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
12880 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
12881 ExtVT.getVectorElementType(), Ext, Idx);
12882 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
12885 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12886 const TargetRegisterClass* rc = getRegClassFor(VecVT);
12887 unsigned MaxSift = rc->getSize()*8 - 1;
12888 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
12889 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
12890 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
12891 DAG.getConstant(MaxSift, MVT::i8));
12892 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
12893 DAG.getIntPtrConstant(0));
12897 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
12898 SelectionDAG &DAG) const {
12900 SDValue Vec = Op.getOperand(0);
12901 MVT VecVT = Vec.getSimpleValueType();
12902 SDValue Idx = Op.getOperand(1);
12904 if (Op.getSimpleValueType() == MVT::i1)
12905 return ExtractBitFromMaskVector(Op, DAG);
12907 if (!isa<ConstantSDNode>(Idx)) {
12908 if (VecVT.is512BitVector() ||
12909 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
12910 VecVT.getVectorElementType().getSizeInBits() == 32)) {
12913 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
12914 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
12915 MaskEltVT.getSizeInBits());
12917 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
12918 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
12919 getZeroVector(MaskVT, Subtarget, DAG, dl),
12920 Idx, DAG.getConstant(0, getPointerTy()));
12921 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
12922 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
12923 Perm, DAG.getConstant(0, getPointerTy()));
12928 // If this is a 256-bit vector result, first extract the 128-bit vector and
12929 // then extract the element from the 128-bit vector.
12930 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
12932 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12933 // Get the 128-bit vector.
12934 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
12935 MVT EltVT = VecVT.getVectorElementType();
12937 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
12939 //if (IdxVal >= NumElems/2)
12940 // IdxVal -= NumElems/2;
12941 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
12942 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
12943 DAG.getConstant(IdxVal, MVT::i32));
12946 assert(VecVT.is128BitVector() && "Unexpected vector length");
12948 if (Subtarget->hasSSE41()) {
12949 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
12954 MVT VT = Op.getSimpleValueType();
12955 // TODO: handle v16i8.
12956 if (VT.getSizeInBits() == 16) {
12957 SDValue Vec = Op.getOperand(0);
12958 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12960 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12961 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12962 DAG.getNode(ISD::BITCAST, dl,
12964 Op.getOperand(1)));
12965 // Transform it so it match pextrw which produces a 32-bit result.
12966 MVT EltVT = MVT::i32;
12967 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
12968 Op.getOperand(0), Op.getOperand(1));
12969 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
12970 DAG.getValueType(VT));
12971 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12974 if (VT.getSizeInBits() == 32) {
12975 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12979 // SHUFPS the element to the lowest double word, then movss.
12980 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
12981 MVT VVT = Op.getOperand(0).getSimpleValueType();
12982 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
12983 DAG.getUNDEF(VVT), Mask);
12984 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
12985 DAG.getIntPtrConstant(0));
12988 if (VT.getSizeInBits() == 64) {
12989 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
12990 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
12991 // to match extract_elt for f64.
12992 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12996 // UNPCKHPD the element to the lowest double word, then movsd.
12997 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
12998 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
12999 int Mask[2] = { 1, -1 };
13000 MVT VVT = Op.getOperand(0).getSimpleValueType();
13001 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13002 DAG.getUNDEF(VVT), Mask);
13003 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13004 DAG.getIntPtrConstant(0));
13010 /// Insert one bit to mask vector, like v16i1 or v8i1.
13011 /// AVX-512 feature.
13013 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13015 SDValue Vec = Op.getOperand(0);
13016 SDValue Elt = Op.getOperand(1);
13017 SDValue Idx = Op.getOperand(2);
13018 MVT VecVT = Vec.getSimpleValueType();
13020 if (!isa<ConstantSDNode>(Idx)) {
13021 // Non constant index. Extend source and destination,
13022 // insert element and then truncate the result.
13023 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13024 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13025 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13026 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13027 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13028 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13031 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13032 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13033 if (Vec.getOpcode() == ISD::UNDEF)
13034 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13035 DAG.getConstant(IdxVal, MVT::i8));
13036 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13037 unsigned MaxSift = rc->getSize()*8 - 1;
13038 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13039 DAG.getConstant(MaxSift, MVT::i8));
13040 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13041 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13042 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13045 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13046 SelectionDAG &DAG) const {
13047 MVT VT = Op.getSimpleValueType();
13048 MVT EltVT = VT.getVectorElementType();
13050 if (EltVT == MVT::i1)
13051 return InsertBitToMaskVector(Op, DAG);
13054 SDValue N0 = Op.getOperand(0);
13055 SDValue N1 = Op.getOperand(1);
13056 SDValue N2 = Op.getOperand(2);
13057 if (!isa<ConstantSDNode>(N2))
13059 auto *N2C = cast<ConstantSDNode>(N2);
13060 unsigned IdxVal = N2C->getZExtValue();
13062 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13063 // into that, and then insert the subvector back into the result.
13064 if (VT.is256BitVector() || VT.is512BitVector()) {
13065 // Get the desired 128-bit vector half.
13066 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13068 // Insert the element into the desired half.
13069 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13070 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13072 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13073 DAG.getConstant(IdxIn128, MVT::i32));
13075 // Insert the changed part back to the 256-bit vector
13076 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13078 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13080 if (Subtarget->hasSSE41()) {
13081 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13083 if (VT == MVT::v8i16) {
13084 Opc = X86ISD::PINSRW;
13086 assert(VT == MVT::v16i8);
13087 Opc = X86ISD::PINSRB;
13090 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13092 if (N1.getValueType() != MVT::i32)
13093 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13094 if (N2.getValueType() != MVT::i32)
13095 N2 = DAG.getIntPtrConstant(IdxVal);
13096 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13099 if (EltVT == MVT::f32) {
13100 // Bits [7:6] of the constant are the source select. This will always be
13101 // zero here. The DAG Combiner may combine an extract_elt index into
13103 // bits. For example (insert (extract, 3), 2) could be matched by
13105 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13106 // Bits [5:4] of the constant are the destination select. This is the
13107 // value of the incoming immediate.
13108 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13109 // combine either bitwise AND or insert of float 0.0 to set these bits.
13110 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13111 // Create this as a scalar to vector..
13112 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13113 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13116 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13117 // PINSR* works with constant index.
13122 if (EltVT == MVT::i8)
13125 if (EltVT.getSizeInBits() == 16) {
13126 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13127 // as its second argument.
13128 if (N1.getValueType() != MVT::i32)
13129 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13130 if (N2.getValueType() != MVT::i32)
13131 N2 = DAG.getIntPtrConstant(IdxVal);
13132 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13137 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13139 MVT OpVT = Op.getSimpleValueType();
13141 // If this is a 256-bit vector result, first insert into a 128-bit
13142 // vector and then insert into the 256-bit vector.
13143 if (!OpVT.is128BitVector()) {
13144 // Insert into a 128-bit vector.
13145 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13146 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13147 OpVT.getVectorNumElements() / SizeFactor);
13149 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13151 // Insert the 128-bit vector.
13152 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13155 if (OpVT == MVT::v1i64 &&
13156 Op.getOperand(0).getValueType() == MVT::i64)
13157 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13159 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13160 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13161 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13162 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13165 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13166 // a simple subregister reference or explicit instructions to grab
13167 // upper bits of a vector.
13168 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13169 SelectionDAG &DAG) {
13171 SDValue In = Op.getOperand(0);
13172 SDValue Idx = Op.getOperand(1);
13173 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13174 MVT ResVT = Op.getSimpleValueType();
13175 MVT InVT = In.getSimpleValueType();
13177 if (Subtarget->hasFp256()) {
13178 if (ResVT.is128BitVector() &&
13179 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13180 isa<ConstantSDNode>(Idx)) {
13181 return Extract128BitVector(In, IdxVal, DAG, dl);
13183 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13184 isa<ConstantSDNode>(Idx)) {
13185 return Extract256BitVector(In, IdxVal, DAG, dl);
13191 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13192 // simple superregister reference or explicit instructions to insert
13193 // the upper bits of a vector.
13194 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13195 SelectionDAG &DAG) {
13196 if (Subtarget->hasFp256()) {
13197 SDLoc dl(Op.getNode());
13198 SDValue Vec = Op.getNode()->getOperand(0);
13199 SDValue SubVec = Op.getNode()->getOperand(1);
13200 SDValue Idx = Op.getNode()->getOperand(2);
13202 if ((Op.getNode()->getSimpleValueType(0).is256BitVector() ||
13203 Op.getNode()->getSimpleValueType(0).is512BitVector()) &&
13204 SubVec.getNode()->getSimpleValueType(0).is128BitVector() &&
13205 isa<ConstantSDNode>(Idx)) {
13206 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13207 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13210 if (Op.getNode()->getSimpleValueType(0).is512BitVector() &&
13211 SubVec.getNode()->getSimpleValueType(0).is256BitVector() &&
13212 isa<ConstantSDNode>(Idx)) {
13213 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13214 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13220 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13221 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13222 // one of the above mentioned nodes. It has to be wrapped because otherwise
13223 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13224 // be used to form addressing mode. These wrapped nodes will be selected
13227 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13228 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13230 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13231 // global base reg.
13232 unsigned char OpFlag = 0;
13233 unsigned WrapperKind = X86ISD::Wrapper;
13234 CodeModel::Model M = DAG.getTarget().getCodeModel();
13236 if (Subtarget->isPICStyleRIPRel() &&
13237 (M == CodeModel::Small || M == CodeModel::Kernel))
13238 WrapperKind = X86ISD::WrapperRIP;
13239 else if (Subtarget->isPICStyleGOT())
13240 OpFlag = X86II::MO_GOTOFF;
13241 else if (Subtarget->isPICStyleStubPIC())
13242 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13244 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13245 CP->getAlignment(),
13246 CP->getOffset(), OpFlag);
13248 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13249 // With PIC, the address is actually $g + Offset.
13251 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13252 DAG.getNode(X86ISD::GlobalBaseReg,
13253 SDLoc(), getPointerTy()),
13260 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13261 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13263 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13264 // global base reg.
13265 unsigned char OpFlag = 0;
13266 unsigned WrapperKind = X86ISD::Wrapper;
13267 CodeModel::Model M = DAG.getTarget().getCodeModel();
13269 if (Subtarget->isPICStyleRIPRel() &&
13270 (M == CodeModel::Small || M == CodeModel::Kernel))
13271 WrapperKind = X86ISD::WrapperRIP;
13272 else if (Subtarget->isPICStyleGOT())
13273 OpFlag = X86II::MO_GOTOFF;
13274 else if (Subtarget->isPICStyleStubPIC())
13275 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13277 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13280 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13282 // With PIC, the address is actually $g + Offset.
13284 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13285 DAG.getNode(X86ISD::GlobalBaseReg,
13286 SDLoc(), getPointerTy()),
13293 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13294 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13296 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13297 // global base reg.
13298 unsigned char OpFlag = 0;
13299 unsigned WrapperKind = X86ISD::Wrapper;
13300 CodeModel::Model M = DAG.getTarget().getCodeModel();
13302 if (Subtarget->isPICStyleRIPRel() &&
13303 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13304 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13305 OpFlag = X86II::MO_GOTPCREL;
13306 WrapperKind = X86ISD::WrapperRIP;
13307 } else if (Subtarget->isPICStyleGOT()) {
13308 OpFlag = X86II::MO_GOT;
13309 } else if (Subtarget->isPICStyleStubPIC()) {
13310 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13311 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13312 OpFlag = X86II::MO_DARWIN_NONLAZY;
13315 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13318 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13320 // With PIC, the address is actually $g + Offset.
13321 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13322 !Subtarget->is64Bit()) {
13323 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13324 DAG.getNode(X86ISD::GlobalBaseReg,
13325 SDLoc(), getPointerTy()),
13329 // For symbols that require a load from a stub to get the address, emit the
13331 if (isGlobalStubReference(OpFlag))
13332 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13333 MachinePointerInfo::getGOT(), false, false, false, 0);
13339 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13340 // Create the TargetBlockAddressAddress node.
13341 unsigned char OpFlags =
13342 Subtarget->ClassifyBlockAddressReference();
13343 CodeModel::Model M = DAG.getTarget().getCodeModel();
13344 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13345 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13347 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13350 if (Subtarget->isPICStyleRIPRel() &&
13351 (M == CodeModel::Small || M == CodeModel::Kernel))
13352 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13354 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13356 // With PIC, the address is actually $g + Offset.
13357 if (isGlobalRelativeToPICBase(OpFlags)) {
13358 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13359 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13367 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13368 int64_t Offset, SelectionDAG &DAG) const {
13369 // Create the TargetGlobalAddress node, folding in the constant
13370 // offset if it is legal.
13371 unsigned char OpFlags =
13372 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13373 CodeModel::Model M = DAG.getTarget().getCodeModel();
13375 if (OpFlags == X86II::MO_NO_FLAG &&
13376 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13377 // A direct static reference to a global.
13378 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13381 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13384 if (Subtarget->isPICStyleRIPRel() &&
13385 (M == CodeModel::Small || M == CodeModel::Kernel))
13386 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13388 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13390 // With PIC, the address is actually $g + Offset.
13391 if (isGlobalRelativeToPICBase(OpFlags)) {
13392 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13393 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13397 // For globals that require a load from a stub to get the address, emit the
13399 if (isGlobalStubReference(OpFlags))
13400 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13401 MachinePointerInfo::getGOT(), false, false, false, 0);
13403 // If there was a non-zero offset that we didn't fold, create an explicit
13404 // addition for it.
13406 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13407 DAG.getConstant(Offset, getPointerTy()));
13413 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13414 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13415 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13416 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13420 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13421 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13422 unsigned char OperandFlags, bool LocalDynamic = false) {
13423 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13424 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13426 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13427 GA->getValueType(0),
13431 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13435 SDValue Ops[] = { Chain, TGA, *InFlag };
13436 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13438 SDValue Ops[] = { Chain, TGA };
13439 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13442 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13443 MFI->setAdjustsStack(true);
13444 MFI->setHasCalls(true);
13446 SDValue Flag = Chain.getValue(1);
13447 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13450 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13452 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13455 SDLoc dl(GA); // ? function entry point might be better
13456 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13457 DAG.getNode(X86ISD::GlobalBaseReg,
13458 SDLoc(), PtrVT), InFlag);
13459 InFlag = Chain.getValue(1);
13461 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13464 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13466 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13468 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13469 X86::RAX, X86II::MO_TLSGD);
13472 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13478 // Get the start address of the TLS block for this module.
13479 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13480 .getInfo<X86MachineFunctionInfo>();
13481 MFI->incNumLocalDynamicTLSAccesses();
13485 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13486 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13489 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13490 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13491 InFlag = Chain.getValue(1);
13492 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13493 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13496 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13500 unsigned char OperandFlags = X86II::MO_DTPOFF;
13501 unsigned WrapperKind = X86ISD::Wrapper;
13502 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13503 GA->getValueType(0),
13504 GA->getOffset(), OperandFlags);
13505 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13507 // Add x@dtpoff with the base.
13508 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13511 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13512 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13513 const EVT PtrVT, TLSModel::Model model,
13514 bool is64Bit, bool isPIC) {
13517 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13518 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13519 is64Bit ? 257 : 256));
13521 SDValue ThreadPointer =
13522 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13523 MachinePointerInfo(Ptr), false, false, false, 0);
13525 unsigned char OperandFlags = 0;
13526 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13528 unsigned WrapperKind = X86ISD::Wrapper;
13529 if (model == TLSModel::LocalExec) {
13530 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13531 } else if (model == TLSModel::InitialExec) {
13533 OperandFlags = X86II::MO_GOTTPOFF;
13534 WrapperKind = X86ISD::WrapperRIP;
13536 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13539 llvm_unreachable("Unexpected model");
13542 // emit "addl x@ntpoff,%eax" (local exec)
13543 // or "addl x@indntpoff,%eax" (initial exec)
13544 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13546 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13547 GA->getOffset(), OperandFlags);
13548 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13550 if (model == TLSModel::InitialExec) {
13551 if (isPIC && !is64Bit) {
13552 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13553 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13557 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13558 MachinePointerInfo::getGOT(), false, false, false, 0);
13561 // The address of the thread local variable is the add of the thread
13562 // pointer with the offset of the variable.
13563 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13567 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13569 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13570 const GlobalValue *GV = GA->getGlobal();
13572 if (Subtarget->isTargetELF()) {
13573 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13576 case TLSModel::GeneralDynamic:
13577 if (Subtarget->is64Bit())
13578 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13579 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13580 case TLSModel::LocalDynamic:
13581 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13582 Subtarget->is64Bit());
13583 case TLSModel::InitialExec:
13584 case TLSModel::LocalExec:
13585 return LowerToTLSExecModel(
13586 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13587 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13589 llvm_unreachable("Unknown TLS model.");
13592 if (Subtarget->isTargetDarwin()) {
13593 // Darwin only has one model of TLS. Lower to that.
13594 unsigned char OpFlag = 0;
13595 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13596 X86ISD::WrapperRIP : X86ISD::Wrapper;
13598 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13599 // global base reg.
13600 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13601 !Subtarget->is64Bit();
13603 OpFlag = X86II::MO_TLVP_PIC_BASE;
13605 OpFlag = X86II::MO_TLVP;
13607 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13608 GA->getValueType(0),
13609 GA->getOffset(), OpFlag);
13610 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13612 // With PIC32, the address is actually $g + Offset.
13614 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13615 DAG.getNode(X86ISD::GlobalBaseReg,
13616 SDLoc(), getPointerTy()),
13619 // Lowering the machine isd will make sure everything is in the right
13621 SDValue Chain = DAG.getEntryNode();
13622 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13623 SDValue Args[] = { Chain, Offset };
13624 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13626 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13627 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13628 MFI->setAdjustsStack(true);
13630 // And our return value (tls address) is in the standard call return value
13632 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13633 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13634 Chain.getValue(1));
13637 if (Subtarget->isTargetKnownWindowsMSVC() ||
13638 Subtarget->isTargetWindowsGNU()) {
13639 // Just use the implicit TLS architecture
13640 // Need to generate someting similar to:
13641 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13643 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13644 // mov rcx, qword [rdx+rcx*8]
13645 // mov eax, .tls$:tlsvar
13646 // [rax+rcx] contains the address
13647 // Windows 64bit: gs:0x58
13648 // Windows 32bit: fs:__tls_array
13651 SDValue Chain = DAG.getEntryNode();
13653 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13654 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13655 // use its literal value of 0x2C.
13656 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13657 ? Type::getInt8PtrTy(*DAG.getContext(),
13659 : Type::getInt32PtrTy(*DAG.getContext(),
13663 Subtarget->is64Bit()
13664 ? DAG.getIntPtrConstant(0x58)
13665 : (Subtarget->isTargetWindowsGNU()
13666 ? DAG.getIntPtrConstant(0x2C)
13667 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13669 SDValue ThreadPointer =
13670 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13671 MachinePointerInfo(Ptr), false, false, false, 0);
13673 // Load the _tls_index variable
13674 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13675 if (Subtarget->is64Bit())
13676 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13677 IDX, MachinePointerInfo(), MVT::i32,
13678 false, false, false, 0);
13680 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13681 false, false, false, 0);
13683 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13685 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
13687 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
13688 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
13689 false, false, false, 0);
13691 // Get the offset of start of .tls section
13692 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13693 GA->getValueType(0),
13694 GA->getOffset(), X86II::MO_SECREL);
13695 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
13697 // The address of the thread local variable is the add of the thread
13698 // pointer with the offset of the variable.
13699 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
13702 llvm_unreachable("TLS not implemented for this target.");
13705 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
13706 /// and take a 2 x i32 value to shift plus a shift amount.
13707 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
13708 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
13709 MVT VT = Op.getSimpleValueType();
13710 unsigned VTBits = VT.getSizeInBits();
13712 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
13713 SDValue ShOpLo = Op.getOperand(0);
13714 SDValue ShOpHi = Op.getOperand(1);
13715 SDValue ShAmt = Op.getOperand(2);
13716 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
13717 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
13719 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13720 DAG.getConstant(VTBits - 1, MVT::i8));
13721 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
13722 DAG.getConstant(VTBits - 1, MVT::i8))
13723 : DAG.getConstant(0, VT);
13725 SDValue Tmp2, Tmp3;
13726 if (Op.getOpcode() == ISD::SHL_PARTS) {
13727 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
13728 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
13730 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
13731 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
13734 // If the shift amount is larger or equal than the width of a part we can't
13735 // rely on the results of shld/shrd. Insert a test and select the appropriate
13736 // values for large shift amounts.
13737 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13738 DAG.getConstant(VTBits, MVT::i8));
13739 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13740 AndNode, DAG.getConstant(0, MVT::i8));
13743 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13744 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
13745 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
13747 if (Op.getOpcode() == ISD::SHL_PARTS) {
13748 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13749 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13751 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13752 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13755 SDValue Ops[2] = { Lo, Hi };
13756 return DAG.getMergeValues(Ops, dl);
13759 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
13760 SelectionDAG &DAG) const {
13761 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
13764 if (SrcVT.isVector()) {
13765 if (SrcVT.getVectorElementType() == MVT::i1) {
13766 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
13767 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13768 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
13769 Op.getOperand(0)));
13774 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
13775 "Unknown SINT_TO_FP to lower!");
13777 // These are really Legal; return the operand so the caller accepts it as
13779 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
13781 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
13782 Subtarget->is64Bit()) {
13786 unsigned Size = SrcVT.getSizeInBits()/8;
13787 MachineFunction &MF = DAG.getMachineFunction();
13788 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
13789 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13790 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13792 MachinePointerInfo::getFixedStack(SSFI),
13794 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
13797 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
13799 SelectionDAG &DAG) const {
13803 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
13805 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
13807 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
13809 unsigned ByteSize = SrcVT.getSizeInBits()/8;
13811 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
13812 MachineMemOperand *MMO;
13814 int SSFI = FI->getIndex();
13816 DAG.getMachineFunction()
13817 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13818 MachineMemOperand::MOLoad, ByteSize, ByteSize);
13820 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
13821 StackSlot = StackSlot.getOperand(1);
13823 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
13824 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
13826 Tys, Ops, SrcVT, MMO);
13829 Chain = Result.getValue(1);
13830 SDValue InFlag = Result.getValue(2);
13832 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
13833 // shouldn't be necessary except that RFP cannot be live across
13834 // multiple blocks. When stackifier is fixed, they can be uncoupled.
13835 MachineFunction &MF = DAG.getMachineFunction();
13836 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
13837 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
13838 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13839 Tys = DAG.getVTList(MVT::Other);
13841 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
13843 MachineMemOperand *MMO =
13844 DAG.getMachineFunction()
13845 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13846 MachineMemOperand::MOStore, SSFISize, SSFISize);
13848 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
13849 Ops, Op.getValueType(), MMO);
13850 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
13851 MachinePointerInfo::getFixedStack(SSFI),
13852 false, false, false, 0);
13858 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
13859 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
13860 SelectionDAG &DAG) const {
13861 // This algorithm is not obvious. Here it is what we're trying to output:
13864 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
13865 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
13867 haddpd %xmm0, %xmm0
13869 pshufd $0x4e, %xmm0, %xmm1
13875 LLVMContext *Context = DAG.getContext();
13877 // Build some magic constants.
13878 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
13879 Constant *C0 = ConstantDataVector::get(*Context, CV0);
13880 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
13882 SmallVector<Constant*,2> CV1;
13884 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13885 APInt(64, 0x4330000000000000ULL))));
13887 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13888 APInt(64, 0x4530000000000000ULL))));
13889 Constant *C1 = ConstantVector::get(CV1);
13890 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
13892 // Load the 64-bit value into an XMM register.
13893 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
13895 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
13896 MachinePointerInfo::getConstantPool(),
13897 false, false, false, 16);
13898 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
13899 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
13902 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
13903 MachinePointerInfo::getConstantPool(),
13904 false, false, false, 16);
13905 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
13906 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
13909 if (Subtarget->hasSSE3()) {
13910 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
13911 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
13913 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
13914 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
13916 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
13917 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
13921 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
13922 DAG.getIntPtrConstant(0));
13925 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
13926 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
13927 SelectionDAG &DAG) const {
13929 // FP constant to bias correct the final result.
13930 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
13933 // Load the 32-bit value into an XMM register.
13934 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
13937 // Zero out the upper parts of the register.
13938 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
13940 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13941 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
13942 DAG.getIntPtrConstant(0));
13944 // Or the load with the bias.
13945 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
13946 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
13947 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
13948 MVT::v2f64, Load)),
13949 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
13950 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
13951 MVT::v2f64, Bias)));
13952 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13953 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
13954 DAG.getIntPtrConstant(0));
13956 // Subtract the bias.
13957 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
13959 // Handle final rounding.
13960 EVT DestVT = Op.getValueType();
13962 if (DestVT.bitsLT(MVT::f64))
13963 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
13964 DAG.getIntPtrConstant(0));
13965 if (DestVT.bitsGT(MVT::f64))
13966 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
13968 // Handle final rounding.
13972 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
13973 const X86Subtarget &Subtarget) {
13974 // The algorithm is the following:
13975 // #ifdef __SSE4_1__
13976 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
13977 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
13978 // (uint4) 0x53000000, 0xaa);
13980 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
13981 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
13983 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
13984 // return (float4) lo + fhi;
13987 SDValue V = Op->getOperand(0);
13988 EVT VecIntVT = V.getValueType();
13989 bool Is128 = VecIntVT == MVT::v4i32;
13990 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
13991 // If we convert to something else than the supported type, e.g., to v4f64,
13993 if (VecFloatVT != Op->getValueType(0))
13996 unsigned NumElts = VecIntVT.getVectorNumElements();
13997 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
13998 "Unsupported custom type");
13999 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14001 // In the #idef/#else code, we have in common:
14002 // - The vector of constants:
14008 // Create the splat vector for 0x4b000000.
14009 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14010 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14011 CstLow, CstLow, CstLow, CstLow};
14012 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14013 makeArrayRef(&CstLowArray[0], NumElts));
14014 // Create the splat vector for 0x53000000.
14015 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14016 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14017 CstHigh, CstHigh, CstHigh, CstHigh};
14018 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14019 makeArrayRef(&CstHighArray[0], NumElts));
14021 // Create the right shift.
14022 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14023 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14024 CstShift, CstShift, CstShift, CstShift};
14025 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14026 makeArrayRef(&CstShiftArray[0], NumElts));
14027 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14030 if (Subtarget.hasSSE41()) {
14031 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14032 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14033 SDValue VecCstLowBitcast =
14034 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14035 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14036 // Low will be bitcasted right away, so do not bother bitcasting back to its
14038 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14039 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14040 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14041 // (uint4) 0x53000000, 0xaa);
14042 SDValue VecCstHighBitcast =
14043 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14044 SDValue VecShiftBitcast =
14045 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14046 // High will be bitcasted right away, so do not bother bitcasting back to
14047 // its original type.
14048 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14049 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14051 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14052 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14053 CstMask, CstMask, CstMask);
14054 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14055 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14056 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14058 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14059 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14062 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14063 SDValue CstFAdd = DAG.getConstantFP(
14064 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14065 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14066 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14067 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14068 makeArrayRef(&CstFAddArray[0], NumElts));
14070 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14071 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14073 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14074 // return (float4) lo + fhi;
14075 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14076 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14079 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14080 SelectionDAG &DAG) const {
14081 SDValue N0 = Op.getOperand(0);
14082 MVT SVT = N0.getSimpleValueType();
14085 switch (SVT.SimpleTy) {
14087 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14092 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14093 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14094 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14098 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14100 llvm_unreachable(nullptr);
14103 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14104 SelectionDAG &DAG) const {
14105 SDValue N0 = Op.getOperand(0);
14108 if (Op.getValueType().isVector())
14109 return lowerUINT_TO_FP_vec(Op, DAG);
14111 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14112 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14113 // the optimization here.
14114 if (DAG.SignBitIsZero(N0))
14115 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14117 MVT SrcVT = N0.getSimpleValueType();
14118 MVT DstVT = Op.getSimpleValueType();
14119 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14120 return LowerUINT_TO_FP_i64(Op, DAG);
14121 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14122 return LowerUINT_TO_FP_i32(Op, DAG);
14123 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14126 // Make a 64-bit buffer, and use it to build an FILD.
14127 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14128 if (SrcVT == MVT::i32) {
14129 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14130 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14131 getPointerTy(), StackSlot, WordOff);
14132 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14133 StackSlot, MachinePointerInfo(),
14135 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14136 OffsetSlot, MachinePointerInfo(),
14138 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14142 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14143 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14144 StackSlot, MachinePointerInfo(),
14146 // For i64 source, we need to add the appropriate power of 2 if the input
14147 // was negative. This is the same as the optimization in
14148 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14149 // we must be careful to do the computation in x87 extended precision, not
14150 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14151 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14152 MachineMemOperand *MMO =
14153 DAG.getMachineFunction()
14154 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14155 MachineMemOperand::MOLoad, 8, 8);
14157 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14158 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14159 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14162 APInt FF(32, 0x5F800000ULL);
14164 // Check whether the sign bit is set.
14165 SDValue SignSet = DAG.getSetCC(dl,
14166 getSetCCResultType(*DAG.getContext(), MVT::i64),
14167 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14170 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14171 SDValue FudgePtr = DAG.getConstantPool(
14172 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14175 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14176 SDValue Zero = DAG.getIntPtrConstant(0);
14177 SDValue Four = DAG.getIntPtrConstant(4);
14178 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14180 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14182 // Load the value out, extending it from f32 to f80.
14183 // FIXME: Avoid the extend by constructing the right constant pool?
14184 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14185 FudgePtr, MachinePointerInfo::getConstantPool(),
14186 MVT::f32, false, false, false, 4);
14187 // Extend everything to 80 bits to force it to be done on x87.
14188 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14189 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14192 std::pair<SDValue,SDValue>
14193 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14194 bool IsSigned, bool IsReplace) const {
14197 EVT DstTy = Op.getValueType();
14199 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14200 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14204 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14205 DstTy.getSimpleVT() >= MVT::i16 &&
14206 "Unknown FP_TO_INT to lower!");
14208 // These are really Legal.
14209 if (DstTy == MVT::i32 &&
14210 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14211 return std::make_pair(SDValue(), SDValue());
14212 if (Subtarget->is64Bit() &&
14213 DstTy == MVT::i64 &&
14214 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14215 return std::make_pair(SDValue(), SDValue());
14217 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14218 // stack slot, or into the FTOL runtime function.
14219 MachineFunction &MF = DAG.getMachineFunction();
14220 unsigned MemSize = DstTy.getSizeInBits()/8;
14221 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14222 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14225 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14226 Opc = X86ISD::WIN_FTOL;
14228 switch (DstTy.getSimpleVT().SimpleTy) {
14229 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14230 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14231 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14232 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14235 SDValue Chain = DAG.getEntryNode();
14236 SDValue Value = Op.getOperand(0);
14237 EVT TheVT = Op.getOperand(0).getValueType();
14238 // FIXME This causes a redundant load/store if the SSE-class value is already
14239 // in memory, such as if it is on the callstack.
14240 if (isScalarFPTypeInSSEReg(TheVT)) {
14241 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14242 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14243 MachinePointerInfo::getFixedStack(SSFI),
14245 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14247 Chain, StackSlot, DAG.getValueType(TheVT)
14250 MachineMemOperand *MMO =
14251 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14252 MachineMemOperand::MOLoad, MemSize, MemSize);
14253 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14254 Chain = Value.getValue(1);
14255 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14256 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14259 MachineMemOperand *MMO =
14260 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14261 MachineMemOperand::MOStore, MemSize, MemSize);
14263 if (Opc != X86ISD::WIN_FTOL) {
14264 // Build the FP_TO_INT*_IN_MEM
14265 SDValue Ops[] = { Chain, Value, StackSlot };
14266 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14268 return std::make_pair(FIST, StackSlot);
14270 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14271 DAG.getVTList(MVT::Other, MVT::Glue),
14273 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14274 MVT::i32, ftol.getValue(1));
14275 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14276 MVT::i32, eax.getValue(2));
14277 SDValue Ops[] = { eax, edx };
14278 SDValue pair = IsReplace
14279 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14280 : DAG.getMergeValues(Ops, DL);
14281 return std::make_pair(pair, SDValue());
14285 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14286 const X86Subtarget *Subtarget) {
14287 MVT VT = Op->getSimpleValueType(0);
14288 SDValue In = Op->getOperand(0);
14289 MVT InVT = In.getSimpleValueType();
14292 // Optimize vectors in AVX mode:
14295 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14296 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14297 // Concat upper and lower parts.
14300 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14301 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14302 // Concat upper and lower parts.
14305 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14306 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14307 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14310 if (Subtarget->hasInt256())
14311 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14313 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14314 SDValue Undef = DAG.getUNDEF(InVT);
14315 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14316 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14317 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14319 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14320 VT.getVectorNumElements()/2);
14322 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14323 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14325 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14328 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14329 SelectionDAG &DAG) {
14330 MVT VT = Op->getSimpleValueType(0);
14331 SDValue In = Op->getOperand(0);
14332 MVT InVT = In.getSimpleValueType();
14334 unsigned int NumElts = VT.getVectorNumElements();
14335 if (NumElts != 8 && NumElts != 16)
14338 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14339 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14341 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14342 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14343 // Now we have only mask extension
14344 assert(InVT.getVectorElementType() == MVT::i1);
14345 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14346 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14347 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14348 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14349 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14350 MachinePointerInfo::getConstantPool(),
14351 false, false, false, Alignment);
14353 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14354 if (VT.is512BitVector())
14356 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14359 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14360 SelectionDAG &DAG) {
14361 if (Subtarget->hasFp256()) {
14362 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14370 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14371 SelectionDAG &DAG) {
14373 MVT VT = Op.getSimpleValueType();
14374 SDValue In = Op.getOperand(0);
14375 MVT SVT = In.getSimpleValueType();
14377 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14378 return LowerZERO_EXTEND_AVX512(Op, DAG);
14380 if (Subtarget->hasFp256()) {
14381 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14386 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14387 VT.getVectorNumElements() != SVT.getVectorNumElements());
14391 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14393 MVT VT = Op.getSimpleValueType();
14394 SDValue In = Op.getOperand(0);
14395 MVT InVT = In.getSimpleValueType();
14397 if (VT == MVT::i1) {
14398 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14399 "Invalid scalar TRUNCATE operation");
14400 if (InVT.getSizeInBits() >= 32)
14402 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14403 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14405 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14406 "Invalid TRUNCATE operation");
14408 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14409 if (VT.getVectorElementType().getSizeInBits() >=8)
14410 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14412 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14413 unsigned NumElts = InVT.getVectorNumElements();
14414 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14415 if (InVT.getSizeInBits() < 512) {
14416 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14417 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14421 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14422 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14423 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14424 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14425 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14426 MachinePointerInfo::getConstantPool(),
14427 false, false, false, Alignment);
14428 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14429 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14430 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14433 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14434 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14435 if (Subtarget->hasInt256()) {
14436 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14437 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14438 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14440 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14441 DAG.getIntPtrConstant(0));
14444 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14445 DAG.getIntPtrConstant(0));
14446 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14447 DAG.getIntPtrConstant(2));
14448 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14449 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14450 static const int ShufMask[] = {0, 2, 4, 6};
14451 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14454 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14455 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14456 if (Subtarget->hasInt256()) {
14457 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14459 SmallVector<SDValue,32> pshufbMask;
14460 for (unsigned i = 0; i < 2; ++i) {
14461 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14462 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14463 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14464 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14465 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14466 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14467 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14468 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14469 for (unsigned j = 0; j < 8; ++j)
14470 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14472 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14473 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14474 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14476 static const int ShufMask[] = {0, 2, -1, -1};
14477 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14479 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14480 DAG.getIntPtrConstant(0));
14481 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14484 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14485 DAG.getIntPtrConstant(0));
14487 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14488 DAG.getIntPtrConstant(4));
14490 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14491 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14493 // The PSHUFB mask:
14494 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14495 -1, -1, -1, -1, -1, -1, -1, -1};
14497 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14498 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14499 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14501 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14502 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14504 // The MOVLHPS Mask:
14505 static const int ShufMask2[] = {0, 1, 4, 5};
14506 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14507 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14510 // Handle truncation of V256 to V128 using shuffles.
14511 if (!VT.is128BitVector() || !InVT.is256BitVector())
14514 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14516 unsigned NumElems = VT.getVectorNumElements();
14517 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14519 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14520 // Prepare truncation shuffle mask
14521 for (unsigned i = 0; i != NumElems; ++i)
14522 MaskVec[i] = i * 2;
14523 SDValue V = DAG.getVectorShuffle(NVT, DL,
14524 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14525 DAG.getUNDEF(NVT), &MaskVec[0]);
14526 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14527 DAG.getIntPtrConstant(0));
14530 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14531 SelectionDAG &DAG) const {
14532 assert(!Op.getSimpleValueType().isVector());
14534 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14535 /*IsSigned=*/ true, /*IsReplace=*/ false);
14536 SDValue FIST = Vals.first, StackSlot = Vals.second;
14537 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14538 if (!FIST.getNode()) return Op;
14540 if (StackSlot.getNode())
14541 // Load the result.
14542 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14543 FIST, StackSlot, MachinePointerInfo(),
14544 false, false, false, 0);
14546 // The node is the result.
14550 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14551 SelectionDAG &DAG) const {
14552 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14553 /*IsSigned=*/ false, /*IsReplace=*/ false);
14554 SDValue FIST = Vals.first, StackSlot = Vals.second;
14555 assert(FIST.getNode() && "Unexpected failure");
14557 if (StackSlot.getNode())
14558 // Load the result.
14559 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14560 FIST, StackSlot, MachinePointerInfo(),
14561 false, false, false, 0);
14563 // The node is the result.
14567 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14569 MVT VT = Op.getSimpleValueType();
14570 SDValue In = Op.getOperand(0);
14571 MVT SVT = In.getSimpleValueType();
14573 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14575 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14576 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14577 In, DAG.getUNDEF(SVT)));
14580 /// The only differences between FABS and FNEG are the mask and the logic op.
14581 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14582 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14583 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14584 "Wrong opcode for lowering FABS or FNEG.");
14586 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14588 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14589 // into an FNABS. We'll lower the FABS after that if it is still in use.
14591 for (SDNode *User : Op->uses())
14592 if (User->getOpcode() == ISD::FNEG)
14595 SDValue Op0 = Op.getOperand(0);
14596 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14599 MVT VT = Op.getSimpleValueType();
14600 // Assume scalar op for initialization; update for vector if needed.
14601 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14602 // generate a 16-byte vector constant and logic op even for the scalar case.
14603 // Using a 16-byte mask allows folding the load of the mask with
14604 // the logic op, so it can save (~4 bytes) on code size.
14606 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14607 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14608 // decide if we should generate a 16-byte constant mask when we only need 4 or
14609 // 8 bytes for the scalar case.
14610 if (VT.isVector()) {
14611 EltVT = VT.getVectorElementType();
14612 NumElts = VT.getVectorNumElements();
14615 unsigned EltBits = EltVT.getSizeInBits();
14616 LLVMContext *Context = DAG.getContext();
14617 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14619 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14620 Constant *C = ConstantInt::get(*Context, MaskElt);
14621 C = ConstantVector::getSplat(NumElts, C);
14622 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14623 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14624 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14625 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14626 MachinePointerInfo::getConstantPool(),
14627 false, false, false, Alignment);
14629 if (VT.isVector()) {
14630 // For a vector, cast operands to a vector type, perform the logic op,
14631 // and cast the result back to the original value type.
14632 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14633 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14634 SDValue Operand = IsFNABS ?
14635 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14636 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14637 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14638 return DAG.getNode(ISD::BITCAST, dl, VT,
14639 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14642 // If not vector, then scalar.
14643 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14644 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14645 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14648 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14649 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14650 LLVMContext *Context = DAG.getContext();
14651 SDValue Op0 = Op.getOperand(0);
14652 SDValue Op1 = Op.getOperand(1);
14654 MVT VT = Op.getSimpleValueType();
14655 MVT SrcVT = Op1.getSimpleValueType();
14657 // If second operand is smaller, extend it first.
14658 if (SrcVT.bitsLT(VT)) {
14659 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14662 // And if it is bigger, shrink it first.
14663 if (SrcVT.bitsGT(VT)) {
14664 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14668 // At this point the operands and the result should have the same
14669 // type, and that won't be f80 since that is not custom lowered.
14671 const fltSemantics &Sem =
14672 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14673 const unsigned SizeInBits = VT.getSizeInBits();
14675 SmallVector<Constant *, 4> CV(
14676 VT == MVT::f64 ? 2 : 4,
14677 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14679 // First, clear all bits but the sign bit from the second operand (sign).
14680 CV[0] = ConstantFP::get(*Context,
14681 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14682 Constant *C = ConstantVector::get(CV);
14683 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14684 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14685 MachinePointerInfo::getConstantPool(),
14686 false, false, false, 16);
14687 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
14689 // Next, clear the sign bit from the first operand (magnitude).
14690 // If it's a constant, we can clear it here.
14691 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
14692 APFloat APF = Op0CN->getValueAPF();
14693 // If the magnitude is a positive zero, the sign bit alone is enough.
14694 if (APF.isPosZero())
14697 CV[0] = ConstantFP::get(*Context, APF);
14699 CV[0] = ConstantFP::get(
14701 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
14703 C = ConstantVector::get(CV);
14704 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14705 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14706 MachinePointerInfo::getConstantPool(),
14707 false, false, false, 16);
14708 // If the magnitude operand wasn't a constant, we need to AND out the sign.
14709 if (!isa<ConstantFPSDNode>(Op0))
14710 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
14712 // OR the magnitude value with the sign bit.
14713 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
14716 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
14717 SDValue N0 = Op.getOperand(0);
14719 MVT VT = Op.getSimpleValueType();
14721 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
14722 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
14723 DAG.getConstant(1, VT));
14724 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
14727 // Check whether an OR'd tree is PTEST-able.
14728 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
14729 SelectionDAG &DAG) {
14730 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
14732 if (!Subtarget->hasSSE41())
14735 if (!Op->hasOneUse())
14738 SDNode *N = Op.getNode();
14741 SmallVector<SDValue, 8> Opnds;
14742 DenseMap<SDValue, unsigned> VecInMap;
14743 SmallVector<SDValue, 8> VecIns;
14744 EVT VT = MVT::Other;
14746 // Recognize a special case where a vector is casted into wide integer to
14748 Opnds.push_back(N->getOperand(0));
14749 Opnds.push_back(N->getOperand(1));
14751 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
14752 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
14753 // BFS traverse all OR'd operands.
14754 if (I->getOpcode() == ISD::OR) {
14755 Opnds.push_back(I->getOperand(0));
14756 Opnds.push_back(I->getOperand(1));
14757 // Re-evaluate the number of nodes to be traversed.
14758 e += 2; // 2 more nodes (LHS and RHS) are pushed.
14762 // Quit if a non-EXTRACT_VECTOR_ELT
14763 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14766 // Quit if without a constant index.
14767 SDValue Idx = I->getOperand(1);
14768 if (!isa<ConstantSDNode>(Idx))
14771 SDValue ExtractedFromVec = I->getOperand(0);
14772 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
14773 if (M == VecInMap.end()) {
14774 VT = ExtractedFromVec.getValueType();
14775 // Quit if not 128/256-bit vector.
14776 if (!VT.is128BitVector() && !VT.is256BitVector())
14778 // Quit if not the same type.
14779 if (VecInMap.begin() != VecInMap.end() &&
14780 VT != VecInMap.begin()->first.getValueType())
14782 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
14783 VecIns.push_back(ExtractedFromVec);
14785 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
14788 assert((VT.is128BitVector() || VT.is256BitVector()) &&
14789 "Not extracted from 128-/256-bit vector.");
14791 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
14793 for (DenseMap<SDValue, unsigned>::const_iterator
14794 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
14795 // Quit if not all elements are used.
14796 if (I->second != FullMask)
14800 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
14802 // Cast all vectors into TestVT for PTEST.
14803 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
14804 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
14806 // If more than one full vectors are evaluated, OR them first before PTEST.
14807 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
14808 // Each iteration will OR 2 nodes and append the result until there is only
14809 // 1 node left, i.e. the final OR'd value of all vectors.
14810 SDValue LHS = VecIns[Slot];
14811 SDValue RHS = VecIns[Slot + 1];
14812 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
14815 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
14816 VecIns.back(), VecIns.back());
14819 /// \brief return true if \c Op has a use that doesn't just read flags.
14820 static bool hasNonFlagsUse(SDValue Op) {
14821 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
14823 SDNode *User = *UI;
14824 unsigned UOpNo = UI.getOperandNo();
14825 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
14826 // Look pass truncate.
14827 UOpNo = User->use_begin().getOperandNo();
14828 User = *User->use_begin();
14831 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
14832 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
14838 /// Emit nodes that will be selected as "test Op0,Op0", or something
14840 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
14841 SelectionDAG &DAG) const {
14842 if (Op.getValueType() == MVT::i1)
14843 // KORTEST instruction should be selected
14844 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14845 DAG.getConstant(0, Op.getValueType()));
14847 // CF and OF aren't always set the way we want. Determine which
14848 // of these we need.
14849 bool NeedCF = false;
14850 bool NeedOF = false;
14853 case X86::COND_A: case X86::COND_AE:
14854 case X86::COND_B: case X86::COND_BE:
14857 case X86::COND_G: case X86::COND_GE:
14858 case X86::COND_L: case X86::COND_LE:
14859 case X86::COND_O: case X86::COND_NO: {
14860 // Check if we really need to set the
14861 // Overflow flag. If NoSignedWrap is present
14862 // that is not actually needed.
14863 switch (Op->getOpcode()) {
14868 const BinaryWithFlagsSDNode *BinNode =
14869 cast<BinaryWithFlagsSDNode>(Op.getNode());
14870 if (BinNode->hasNoSignedWrap())
14880 // See if we can use the EFLAGS value from the operand instead of
14881 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
14882 // we prove that the arithmetic won't overflow, we can't use OF or CF.
14883 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
14884 // Emit a CMP with 0, which is the TEST pattern.
14885 //if (Op.getValueType() == MVT::i1)
14886 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
14887 // DAG.getConstant(0, MVT::i1));
14888 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14889 DAG.getConstant(0, Op.getValueType()));
14891 unsigned Opcode = 0;
14892 unsigned NumOperands = 0;
14894 // Truncate operations may prevent the merge of the SETCC instruction
14895 // and the arithmetic instruction before it. Attempt to truncate the operands
14896 // of the arithmetic instruction and use a reduced bit-width instruction.
14897 bool NeedTruncation = false;
14898 SDValue ArithOp = Op;
14899 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
14900 SDValue Arith = Op->getOperand(0);
14901 // Both the trunc and the arithmetic op need to have one user each.
14902 if (Arith->hasOneUse())
14903 switch (Arith.getOpcode()) {
14910 NeedTruncation = true;
14916 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
14917 // which may be the result of a CAST. We use the variable 'Op', which is the
14918 // non-casted variable when we check for possible users.
14919 switch (ArithOp.getOpcode()) {
14921 // Due to an isel shortcoming, be conservative if this add is likely to be
14922 // selected as part of a load-modify-store instruction. When the root node
14923 // in a match is a store, isel doesn't know how to remap non-chain non-flag
14924 // uses of other nodes in the match, such as the ADD in this case. This
14925 // leads to the ADD being left around and reselected, with the result being
14926 // two adds in the output. Alas, even if none our users are stores, that
14927 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
14928 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
14929 // climbing the DAG back to the root, and it doesn't seem to be worth the
14931 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14932 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14933 if (UI->getOpcode() != ISD::CopyToReg &&
14934 UI->getOpcode() != ISD::SETCC &&
14935 UI->getOpcode() != ISD::STORE)
14938 if (ConstantSDNode *C =
14939 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
14940 // An add of one will be selected as an INC.
14941 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
14942 Opcode = X86ISD::INC;
14947 // An add of negative one (subtract of one) will be selected as a DEC.
14948 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
14949 Opcode = X86ISD::DEC;
14955 // Otherwise use a regular EFLAGS-setting add.
14956 Opcode = X86ISD::ADD;
14961 // If we have a constant logical shift that's only used in a comparison
14962 // against zero turn it into an equivalent AND. This allows turning it into
14963 // a TEST instruction later.
14964 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
14965 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
14966 EVT VT = Op.getValueType();
14967 unsigned BitWidth = VT.getSizeInBits();
14968 unsigned ShAmt = Op->getConstantOperandVal(1);
14969 if (ShAmt >= BitWidth) // Avoid undefined shifts.
14971 APInt Mask = ArithOp.getOpcode() == ISD::SRL
14972 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
14973 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
14974 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
14976 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
14977 DAG.getConstant(Mask, VT));
14978 DAG.ReplaceAllUsesWith(Op, New);
14984 // If the primary and result isn't used, don't bother using X86ISD::AND,
14985 // because a TEST instruction will be better.
14986 if (!hasNonFlagsUse(Op))
14992 // Due to the ISEL shortcoming noted above, be conservative if this op is
14993 // likely to be selected as part of a load-modify-store instruction.
14994 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14995 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14996 if (UI->getOpcode() == ISD::STORE)
14999 // Otherwise use a regular EFLAGS-setting instruction.
15000 switch (ArithOp.getOpcode()) {
15001 default: llvm_unreachable("unexpected operator!");
15002 case ISD::SUB: Opcode = X86ISD::SUB; break;
15003 case ISD::XOR: Opcode = X86ISD::XOR; break;
15004 case ISD::AND: Opcode = X86ISD::AND; break;
15006 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15007 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15008 if (EFLAGS.getNode())
15011 Opcode = X86ISD::OR;
15025 return SDValue(Op.getNode(), 1);
15031 // If we found that truncation is beneficial, perform the truncation and
15033 if (NeedTruncation) {
15034 EVT VT = Op.getValueType();
15035 SDValue WideVal = Op->getOperand(0);
15036 EVT WideVT = WideVal.getValueType();
15037 unsigned ConvertedOp = 0;
15038 // Use a target machine opcode to prevent further DAGCombine
15039 // optimizations that may separate the arithmetic operations
15040 // from the setcc node.
15041 switch (WideVal.getOpcode()) {
15043 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15044 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15045 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15046 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15047 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15051 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15052 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15053 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15054 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15055 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15061 // Emit a CMP with 0, which is the TEST pattern.
15062 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15063 DAG.getConstant(0, Op.getValueType()));
15065 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15066 SmallVector<SDValue, 4> Ops;
15067 for (unsigned i = 0; i != NumOperands; ++i)
15068 Ops.push_back(Op.getOperand(i));
15070 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15071 DAG.ReplaceAllUsesWith(Op, New);
15072 return SDValue(New.getNode(), 1);
15075 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15077 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15078 SDLoc dl, SelectionDAG &DAG) const {
15079 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15080 if (C->getAPIntValue() == 0)
15081 return EmitTest(Op0, X86CC, dl, DAG);
15083 if (Op0.getValueType() == MVT::i1)
15084 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15087 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15088 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15089 // Do the comparison at i32 if it's smaller, besides the Atom case.
15090 // This avoids subregister aliasing issues. Keep the smaller reference
15091 // if we're optimizing for size, however, as that'll allow better folding
15092 // of memory operations.
15093 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15094 !DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
15095 AttributeSet::FunctionIndex, Attribute::MinSize) &&
15096 !Subtarget->isAtom()) {
15097 unsigned ExtendOp =
15098 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15099 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15100 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15102 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15103 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15104 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15106 return SDValue(Sub.getNode(), 1);
15108 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15111 /// Convert a comparison if required by the subtarget.
15112 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15113 SelectionDAG &DAG) const {
15114 // If the subtarget does not support the FUCOMI instruction, floating-point
15115 // comparisons have to be converted.
15116 if (Subtarget->hasCMov() ||
15117 Cmp.getOpcode() != X86ISD::CMP ||
15118 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15119 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15122 // The instruction selector will select an FUCOM instruction instead of
15123 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15124 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15125 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15127 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15128 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15129 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15130 DAG.getConstant(8, MVT::i8));
15131 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15132 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15135 /// The minimum architected relative accuracy is 2^-12. We need one
15136 /// Newton-Raphson step to have a good float result (24 bits of precision).
15137 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15138 DAGCombinerInfo &DCI,
15139 unsigned &RefinementSteps,
15140 bool &UseOneConstNR) const {
15141 // FIXME: We should use instruction latency models to calculate the cost of
15142 // each potential sequence, but this is very hard to do reliably because
15143 // at least Intel's Core* chips have variable timing based on the number of
15144 // significant digits in the divisor and/or sqrt operand.
15145 if (!Subtarget->useSqrtEst())
15148 EVT VT = Op.getValueType();
15150 // SSE1 has rsqrtss and rsqrtps.
15151 // TODO: Add support for AVX512 (v16f32).
15152 // It is likely not profitable to do this for f64 because a double-precision
15153 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15154 // instructions: convert to single, rsqrtss, convert back to double, refine
15155 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15156 // along with FMA, this could be a throughput win.
15157 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15158 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15159 RefinementSteps = 1;
15160 UseOneConstNR = false;
15161 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15166 /// The minimum architected relative accuracy is 2^-12. We need one
15167 /// Newton-Raphson step to have a good float result (24 bits of precision).
15168 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15169 DAGCombinerInfo &DCI,
15170 unsigned &RefinementSteps) const {
15171 // FIXME: We should use instruction latency models to calculate the cost of
15172 // each potential sequence, but this is very hard to do reliably because
15173 // at least Intel's Core* chips have variable timing based on the number of
15174 // significant digits in the divisor.
15175 if (!Subtarget->useReciprocalEst())
15178 EVT VT = Op.getValueType();
15180 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15181 // TODO: Add support for AVX512 (v16f32).
15182 // It is likely not profitable to do this for f64 because a double-precision
15183 // reciprocal estimate with refinement on x86 prior to FMA requires
15184 // 15 instructions: convert to single, rcpss, convert back to double, refine
15185 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15186 // along with FMA, this could be a throughput win.
15187 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15188 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15189 RefinementSteps = ReciprocalEstimateRefinementSteps;
15190 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15195 static bool isAllOnes(SDValue V) {
15196 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15197 return C && C->isAllOnesValue();
15200 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15201 /// if it's possible.
15202 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15203 SDLoc dl, SelectionDAG &DAG) const {
15204 SDValue Op0 = And.getOperand(0);
15205 SDValue Op1 = And.getOperand(1);
15206 if (Op0.getOpcode() == ISD::TRUNCATE)
15207 Op0 = Op0.getOperand(0);
15208 if (Op1.getOpcode() == ISD::TRUNCATE)
15209 Op1 = Op1.getOperand(0);
15212 if (Op1.getOpcode() == ISD::SHL)
15213 std::swap(Op0, Op1);
15214 if (Op0.getOpcode() == ISD::SHL) {
15215 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15216 if (And00C->getZExtValue() == 1) {
15217 // If we looked past a truncate, check that it's only truncating away
15219 unsigned BitWidth = Op0.getValueSizeInBits();
15220 unsigned AndBitWidth = And.getValueSizeInBits();
15221 if (BitWidth > AndBitWidth) {
15223 DAG.computeKnownBits(Op0, Zeros, Ones);
15224 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15228 RHS = Op0.getOperand(1);
15230 } else if (Op1.getOpcode() == ISD::Constant) {
15231 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15232 uint64_t AndRHSVal = AndRHS->getZExtValue();
15233 SDValue AndLHS = Op0;
15235 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15236 LHS = AndLHS.getOperand(0);
15237 RHS = AndLHS.getOperand(1);
15240 // Use BT if the immediate can't be encoded in a TEST instruction.
15241 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15243 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15247 if (LHS.getNode()) {
15248 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15249 // instruction. Since the shift amount is in-range-or-undefined, we know
15250 // that doing a bittest on the i32 value is ok. We extend to i32 because
15251 // the encoding for the i16 version is larger than the i32 version.
15252 // Also promote i16 to i32 for performance / code size reason.
15253 if (LHS.getValueType() == MVT::i8 ||
15254 LHS.getValueType() == MVT::i16)
15255 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15257 // If the operand types disagree, extend the shift amount to match. Since
15258 // BT ignores high bits (like shifts) we can use anyextend.
15259 if (LHS.getValueType() != RHS.getValueType())
15260 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15262 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15263 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15264 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15265 DAG.getConstant(Cond, MVT::i8), BT);
15271 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15273 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15278 // SSE Condition code mapping:
15287 switch (SetCCOpcode) {
15288 default: llvm_unreachable("Unexpected SETCC condition");
15290 case ISD::SETEQ: SSECC = 0; break;
15292 case ISD::SETGT: Swap = true; // Fallthrough
15294 case ISD::SETOLT: SSECC = 1; break;
15296 case ISD::SETGE: Swap = true; // Fallthrough
15298 case ISD::SETOLE: SSECC = 2; break;
15299 case ISD::SETUO: SSECC = 3; break;
15301 case ISD::SETNE: SSECC = 4; break;
15302 case ISD::SETULE: Swap = true; // Fallthrough
15303 case ISD::SETUGE: SSECC = 5; break;
15304 case ISD::SETULT: Swap = true; // Fallthrough
15305 case ISD::SETUGT: SSECC = 6; break;
15306 case ISD::SETO: SSECC = 7; break;
15308 case ISD::SETONE: SSECC = 8; break;
15311 std::swap(Op0, Op1);
15316 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15317 // ones, and then concatenate the result back.
15318 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15319 MVT VT = Op.getSimpleValueType();
15321 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15322 "Unsupported value type for operation");
15324 unsigned NumElems = VT.getVectorNumElements();
15326 SDValue CC = Op.getOperand(2);
15328 // Extract the LHS vectors
15329 SDValue LHS = Op.getOperand(0);
15330 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15331 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15333 // Extract the RHS vectors
15334 SDValue RHS = Op.getOperand(1);
15335 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15336 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15338 // Issue the operation on the smaller types and concatenate the result back
15339 MVT EltVT = VT.getVectorElementType();
15340 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15341 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15342 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15343 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15346 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15347 const X86Subtarget *Subtarget) {
15348 SDValue Op0 = Op.getOperand(0);
15349 SDValue Op1 = Op.getOperand(1);
15350 SDValue CC = Op.getOperand(2);
15351 MVT VT = Op.getSimpleValueType();
15354 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15355 Op.getValueType().getScalarType() == MVT::i1 &&
15356 "Cannot set masked compare for this operation");
15358 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15360 bool Unsigned = false;
15363 switch (SetCCOpcode) {
15364 default: llvm_unreachable("Unexpected SETCC condition");
15365 case ISD::SETNE: SSECC = 4; break;
15366 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15367 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15368 case ISD::SETLT: Swap = true; //fall-through
15369 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15370 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15371 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15372 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15373 case ISD::SETULE: Unsigned = true; //fall-through
15374 case ISD::SETLE: SSECC = 2; break;
15378 std::swap(Op0, Op1);
15380 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15381 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15382 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15383 DAG.getConstant(SSECC, MVT::i8));
15386 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15387 /// operand \p Op1. If non-trivial (for example because it's not constant)
15388 /// return an empty value.
15389 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15391 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15395 MVT VT = Op1.getSimpleValueType();
15396 MVT EVT = VT.getVectorElementType();
15397 unsigned n = VT.getVectorNumElements();
15398 SmallVector<SDValue, 8> ULTOp1;
15400 for (unsigned i = 0; i < n; ++i) {
15401 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15402 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15405 // Avoid underflow.
15406 APInt Val = Elt->getAPIntValue();
15410 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15413 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15416 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15417 SelectionDAG &DAG) {
15418 SDValue Op0 = Op.getOperand(0);
15419 SDValue Op1 = Op.getOperand(1);
15420 SDValue CC = Op.getOperand(2);
15421 MVT VT = Op.getSimpleValueType();
15422 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15423 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15428 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15429 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15432 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15433 unsigned Opc = X86ISD::CMPP;
15434 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15435 assert(VT.getVectorNumElements() <= 16);
15436 Opc = X86ISD::CMPM;
15438 // In the two special cases we can't handle, emit two comparisons.
15441 unsigned CombineOpc;
15442 if (SetCCOpcode == ISD::SETUEQ) {
15443 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15445 assert(SetCCOpcode == ISD::SETONE);
15446 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15449 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15450 DAG.getConstant(CC0, MVT::i8));
15451 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15452 DAG.getConstant(CC1, MVT::i8));
15453 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15455 // Handle all other FP comparisons here.
15456 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15457 DAG.getConstant(SSECC, MVT::i8));
15460 // Break 256-bit integer vector compare into smaller ones.
15461 if (VT.is256BitVector() && !Subtarget->hasInt256())
15462 return Lower256IntVSETCC(Op, DAG);
15464 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15465 EVT OpVT = Op1.getValueType();
15466 if (Subtarget->hasAVX512()) {
15467 if (Op1.getValueType().is512BitVector() ||
15468 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15469 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15470 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15472 // In AVX-512 architecture setcc returns mask with i1 elements,
15473 // But there is no compare instruction for i8 and i16 elements in KNL.
15474 // We are not talking about 512-bit operands in this case, these
15475 // types are illegal.
15477 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15478 OpVT.getVectorElementType().getSizeInBits() >= 8))
15479 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15480 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15483 // We are handling one of the integer comparisons here. Since SSE only has
15484 // GT and EQ comparisons for integer, swapping operands and multiple
15485 // operations may be required for some comparisons.
15487 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15488 bool Subus = false;
15490 switch (SetCCOpcode) {
15491 default: llvm_unreachable("Unexpected SETCC condition");
15492 case ISD::SETNE: Invert = true;
15493 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15494 case ISD::SETLT: Swap = true;
15495 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15496 case ISD::SETGE: Swap = true;
15497 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15498 Invert = true; break;
15499 case ISD::SETULT: Swap = true;
15500 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15501 FlipSigns = true; break;
15502 case ISD::SETUGE: Swap = true;
15503 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15504 FlipSigns = true; Invert = true; break;
15507 // Special case: Use min/max operations for SETULE/SETUGE
15508 MVT VET = VT.getVectorElementType();
15510 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15511 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15514 switch (SetCCOpcode) {
15516 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15517 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15520 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15523 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15524 if (!MinMax && hasSubus) {
15525 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15527 // t = psubus Op0, Op1
15528 // pcmpeq t, <0..0>
15529 switch (SetCCOpcode) {
15531 case ISD::SETULT: {
15532 // If the comparison is against a constant we can turn this into a
15533 // setule. With psubus, setule does not require a swap. This is
15534 // beneficial because the constant in the register is no longer
15535 // destructed as the destination so it can be hoisted out of a loop.
15536 // Only do this pre-AVX since vpcmp* is no longer destructive.
15537 if (Subtarget->hasAVX())
15539 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15540 if (ULEOp1.getNode()) {
15542 Subus = true; Invert = false; Swap = false;
15546 // Psubus is better than flip-sign because it requires no inversion.
15547 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15548 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15552 Opc = X86ISD::SUBUS;
15558 std::swap(Op0, Op1);
15560 // Check that the operation in question is available (most are plain SSE2,
15561 // but PCMPGTQ and PCMPEQQ have different requirements).
15562 if (VT == MVT::v2i64) {
15563 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15564 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15566 // First cast everything to the right type.
15567 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15568 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15570 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15571 // bits of the inputs before performing those operations. The lower
15572 // compare is always unsigned.
15575 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15577 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15578 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15579 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15580 Sign, Zero, Sign, Zero);
15582 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15583 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15585 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15586 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15587 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15589 // Create masks for only the low parts/high parts of the 64 bit integers.
15590 static const int MaskHi[] = { 1, 1, 3, 3 };
15591 static const int MaskLo[] = { 0, 0, 2, 2 };
15592 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15593 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15594 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15596 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15597 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15600 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15602 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15605 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15606 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15607 // pcmpeqd + pshufd + pand.
15608 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15610 // First cast everything to the right type.
15611 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15612 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15615 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15617 // Make sure the lower and upper halves are both all-ones.
15618 static const int Mask[] = { 1, 0, 3, 2 };
15619 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15620 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15623 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15625 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15629 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15630 // bits of the inputs before performing those operations.
15632 EVT EltVT = VT.getVectorElementType();
15633 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15634 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15635 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15638 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15640 // If the logical-not of the result is required, perform that now.
15642 Result = DAG.getNOT(dl, Result, VT);
15645 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15648 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15649 getZeroVector(VT, Subtarget, DAG, dl));
15654 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15656 MVT VT = Op.getSimpleValueType();
15658 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15660 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15661 && "SetCC type must be 8-bit or 1-bit integer");
15662 SDValue Op0 = Op.getOperand(0);
15663 SDValue Op1 = Op.getOperand(1);
15665 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15667 // Optimize to BT if possible.
15668 // Lower (X & (1 << N)) == 0 to BT(X, N).
15669 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15670 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15671 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15672 Op1.getOpcode() == ISD::Constant &&
15673 cast<ConstantSDNode>(Op1)->isNullValue() &&
15674 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15675 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15676 if (NewSetCC.getNode()) {
15678 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15683 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15685 if (Op1.getOpcode() == ISD::Constant &&
15686 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
15687 cast<ConstantSDNode>(Op1)->isNullValue()) &&
15688 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15690 // If the input is a setcc, then reuse the input setcc or use a new one with
15691 // the inverted condition.
15692 if (Op0.getOpcode() == X86ISD::SETCC) {
15693 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
15694 bool Invert = (CC == ISD::SETNE) ^
15695 cast<ConstantSDNode>(Op1)->isNullValue();
15699 CCode = X86::GetOppositeBranchCondition(CCode);
15700 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15701 DAG.getConstant(CCode, MVT::i8),
15702 Op0.getOperand(1));
15704 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15708 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
15709 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
15710 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15712 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
15713 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
15716 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
15717 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
15718 if (X86CC == X86::COND_INVALID)
15721 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
15722 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
15723 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15724 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
15726 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15730 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
15731 static bool isX86LogicalCmp(SDValue Op) {
15732 unsigned Opc = Op.getNode()->getOpcode();
15733 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
15734 Opc == X86ISD::SAHF)
15736 if (Op.getResNo() == 1 &&
15737 (Opc == X86ISD::ADD ||
15738 Opc == X86ISD::SUB ||
15739 Opc == X86ISD::ADC ||
15740 Opc == X86ISD::SBB ||
15741 Opc == X86ISD::SMUL ||
15742 Opc == X86ISD::UMUL ||
15743 Opc == X86ISD::INC ||
15744 Opc == X86ISD::DEC ||
15745 Opc == X86ISD::OR ||
15746 Opc == X86ISD::XOR ||
15747 Opc == X86ISD::AND))
15750 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
15756 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
15757 if (V.getOpcode() != ISD::TRUNCATE)
15760 SDValue VOp0 = V.getOperand(0);
15761 unsigned InBits = VOp0.getValueSizeInBits();
15762 unsigned Bits = V.getValueSizeInBits();
15763 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
15766 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
15767 bool addTest = true;
15768 SDValue Cond = Op.getOperand(0);
15769 SDValue Op1 = Op.getOperand(1);
15770 SDValue Op2 = Op.getOperand(2);
15772 EVT VT = Op1.getValueType();
15775 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
15776 // are available. Otherwise fp cmovs get lowered into a less efficient branch
15777 // sequence later on.
15778 if (Cond.getOpcode() == ISD::SETCC &&
15779 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
15780 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
15781 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
15782 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
15783 int SSECC = translateX86FSETCC(
15784 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
15787 if (Subtarget->hasAVX512()) {
15788 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
15789 DAG.getConstant(SSECC, MVT::i8));
15790 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
15792 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
15793 DAG.getConstant(SSECC, MVT::i8));
15794 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
15795 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
15796 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
15800 if (Cond.getOpcode() == ISD::SETCC) {
15801 SDValue NewCond = LowerSETCC(Cond, DAG);
15802 if (NewCond.getNode())
15806 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
15807 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
15808 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
15809 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
15810 if (Cond.getOpcode() == X86ISD::SETCC &&
15811 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
15812 isZero(Cond.getOperand(1).getOperand(1))) {
15813 SDValue Cmp = Cond.getOperand(1);
15815 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
15817 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
15818 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
15819 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
15821 SDValue CmpOp0 = Cmp.getOperand(0);
15822 // Apply further optimizations for special cases
15823 // (select (x != 0), -1, 0) -> neg & sbb
15824 // (select (x == 0), 0, -1) -> neg & sbb
15825 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
15826 if (YC->isNullValue() &&
15827 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
15828 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
15829 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
15830 DAG.getConstant(0, CmpOp0.getValueType()),
15832 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15833 DAG.getConstant(X86::COND_B, MVT::i8),
15834 SDValue(Neg.getNode(), 1));
15838 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
15839 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
15840 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15842 SDValue Res = // Res = 0 or -1.
15843 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15844 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
15846 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
15847 Res = DAG.getNOT(DL, Res, Res.getValueType());
15849 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
15850 if (!N2C || !N2C->isNullValue())
15851 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
15856 // Look past (and (setcc_carry (cmp ...)), 1).
15857 if (Cond.getOpcode() == ISD::AND &&
15858 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
15859 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
15860 if (C && C->getAPIntValue() == 1)
15861 Cond = Cond.getOperand(0);
15864 // If condition flag is set by a X86ISD::CMP, then use it as the condition
15865 // setting operand in place of the X86ISD::SETCC.
15866 unsigned CondOpcode = Cond.getOpcode();
15867 if (CondOpcode == X86ISD::SETCC ||
15868 CondOpcode == X86ISD::SETCC_CARRY) {
15869 CC = Cond.getOperand(0);
15871 SDValue Cmp = Cond.getOperand(1);
15872 unsigned Opc = Cmp.getOpcode();
15873 MVT VT = Op.getSimpleValueType();
15875 bool IllegalFPCMov = false;
15876 if (VT.isFloatingPoint() && !VT.isVector() &&
15877 !isScalarFPTypeInSSEReg(VT)) // FPStack?
15878 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
15880 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
15881 Opc == X86ISD::BT) { // FIXME
15885 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
15886 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
15887 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
15888 Cond.getOperand(0).getValueType() != MVT::i8)) {
15889 SDValue LHS = Cond.getOperand(0);
15890 SDValue RHS = Cond.getOperand(1);
15891 unsigned X86Opcode;
15894 switch (CondOpcode) {
15895 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
15896 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
15897 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
15898 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
15899 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
15900 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
15901 default: llvm_unreachable("unexpected overflowing operator");
15903 if (CondOpcode == ISD::UMULO)
15904 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
15907 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
15909 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
15911 if (CondOpcode == ISD::UMULO)
15912 Cond = X86Op.getValue(2);
15914 Cond = X86Op.getValue(1);
15916 CC = DAG.getConstant(X86Cond, MVT::i8);
15921 // Look pass the truncate if the high bits are known zero.
15922 if (isTruncWithZeroHighBitsInput(Cond, DAG))
15923 Cond = Cond.getOperand(0);
15925 // We know the result of AND is compared against zero. Try to match
15927 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
15928 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
15929 if (NewSetCC.getNode()) {
15930 CC = NewSetCC.getOperand(0);
15931 Cond = NewSetCC.getOperand(1);
15938 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
15939 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
15942 // a < b ? -1 : 0 -> RES = ~setcc_carry
15943 // a < b ? 0 : -1 -> RES = setcc_carry
15944 // a >= b ? -1 : 0 -> RES = setcc_carry
15945 // a >= b ? 0 : -1 -> RES = ~setcc_carry
15946 if (Cond.getOpcode() == X86ISD::SUB) {
15947 Cond = ConvertCmpIfNecessary(Cond, DAG);
15948 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
15950 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
15951 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
15952 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15953 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
15954 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
15955 return DAG.getNOT(DL, Res, Res.getValueType());
15960 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
15961 // widen the cmov and push the truncate through. This avoids introducing a new
15962 // branch during isel and doesn't add any extensions.
15963 if (Op.getValueType() == MVT::i8 &&
15964 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
15965 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
15966 if (T1.getValueType() == T2.getValueType() &&
15967 // Blacklist CopyFromReg to avoid partial register stalls.
15968 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
15969 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
15970 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
15971 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
15975 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
15976 // condition is true.
15977 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
15978 SDValue Ops[] = { Op2, Op1, CC, Cond };
15979 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
15982 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
15983 SelectionDAG &DAG) {
15984 MVT VT = Op->getSimpleValueType(0);
15985 SDValue In = Op->getOperand(0);
15986 MVT InVT = In.getSimpleValueType();
15987 MVT VTElt = VT.getVectorElementType();
15988 MVT InVTElt = InVT.getVectorElementType();
15992 if ((InVTElt == MVT::i1) &&
15993 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
15994 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
15996 ((Subtarget->hasBWI() && VT.is512BitVector() &&
15997 VTElt.getSizeInBits() <= 16)) ||
15999 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16000 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16002 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16003 VTElt.getSizeInBits() >= 32))))
16004 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16006 unsigned int NumElts = VT.getVectorNumElements();
16008 if (NumElts != 8 && NumElts != 16)
16011 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16012 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16013 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16014 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16017 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16018 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16020 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16021 Constant *C = ConstantInt::get(*DAG.getContext(),
16022 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16024 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16025 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16026 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16027 MachinePointerInfo::getConstantPool(),
16028 false, false, false, Alignment);
16029 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16030 if (VT.is512BitVector())
16032 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16035 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16036 SelectionDAG &DAG) {
16037 MVT VT = Op->getSimpleValueType(0);
16038 SDValue In = Op->getOperand(0);
16039 MVT InVT = In.getSimpleValueType();
16042 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16043 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16045 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16046 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16047 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16050 if (Subtarget->hasInt256())
16051 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16053 // Optimize vectors in AVX mode
16054 // Sign extend v8i16 to v8i32 and
16057 // Divide input vector into two parts
16058 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16059 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16060 // concat the vectors to original VT
16062 unsigned NumElems = InVT.getVectorNumElements();
16063 SDValue Undef = DAG.getUNDEF(InVT);
16065 SmallVector<int,8> ShufMask1(NumElems, -1);
16066 for (unsigned i = 0; i != NumElems/2; ++i)
16069 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16071 SmallVector<int,8> ShufMask2(NumElems, -1);
16072 for (unsigned i = 0; i != NumElems/2; ++i)
16073 ShufMask2[i] = i + NumElems/2;
16075 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16077 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16078 VT.getVectorNumElements()/2);
16080 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16081 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16083 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16086 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16087 // may emit an illegal shuffle but the expansion is still better than scalar
16088 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16089 // we'll emit a shuffle and a arithmetic shift.
16090 // TODO: It is possible to support ZExt by zeroing the undef values during
16091 // the shuffle phase or after the shuffle.
16092 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16093 SelectionDAG &DAG) {
16094 MVT RegVT = Op.getSimpleValueType();
16095 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16096 assert(RegVT.isInteger() &&
16097 "We only custom lower integer vector sext loads.");
16099 // Nothing useful we can do without SSE2 shuffles.
16100 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16102 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16104 EVT MemVT = Ld->getMemoryVT();
16105 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16106 unsigned RegSz = RegVT.getSizeInBits();
16108 ISD::LoadExtType Ext = Ld->getExtensionType();
16110 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16111 && "Only anyext and sext are currently implemented.");
16112 assert(MemVT != RegVT && "Cannot extend to the same type");
16113 assert(MemVT.isVector() && "Must load a vector from memory");
16115 unsigned NumElems = RegVT.getVectorNumElements();
16116 unsigned MemSz = MemVT.getSizeInBits();
16117 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16119 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16120 // The only way in which we have a legal 256-bit vector result but not the
16121 // integer 256-bit operations needed to directly lower a sextload is if we
16122 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16123 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16124 // correctly legalized. We do this late to allow the canonical form of
16125 // sextload to persist throughout the rest of the DAG combiner -- it wants
16126 // to fold together any extensions it can, and so will fuse a sign_extend
16127 // of an sextload into a sextload targeting a wider value.
16129 if (MemSz == 128) {
16130 // Just switch this to a normal load.
16131 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16132 "it must be a legal 128-bit vector "
16134 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16135 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16136 Ld->isInvariant(), Ld->getAlignment());
16138 assert(MemSz < 128 &&
16139 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16140 // Do an sext load to a 128-bit vector type. We want to use the same
16141 // number of elements, but elements half as wide. This will end up being
16142 // recursively lowered by this routine, but will succeed as we definitely
16143 // have all the necessary features if we're using AVX1.
16145 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16146 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16148 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16149 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16150 Ld->isNonTemporal(), Ld->isInvariant(),
16151 Ld->getAlignment());
16154 // Replace chain users with the new chain.
16155 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16156 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16158 // Finally, do a normal sign-extend to the desired register.
16159 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16162 // All sizes must be a power of two.
16163 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16164 "Non-power-of-two elements are not custom lowered!");
16166 // Attempt to load the original value using scalar loads.
16167 // Find the largest scalar type that divides the total loaded size.
16168 MVT SclrLoadTy = MVT::i8;
16169 for (MVT Tp : MVT::integer_valuetypes()) {
16170 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16175 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16176 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16178 SclrLoadTy = MVT::f64;
16180 // Calculate the number of scalar loads that we need to perform
16181 // in order to load our vector from memory.
16182 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16184 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16185 "Can only lower sext loads with a single scalar load!");
16187 unsigned loadRegZize = RegSz;
16188 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16191 // Represent our vector as a sequence of elements which are the
16192 // largest scalar that we can load.
16193 EVT LoadUnitVecVT = EVT::getVectorVT(
16194 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16196 // Represent the data using the same element type that is stored in
16197 // memory. In practice, we ''widen'' MemVT.
16199 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16200 loadRegZize / MemVT.getScalarType().getSizeInBits());
16202 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16203 "Invalid vector type");
16205 // We can't shuffle using an illegal type.
16206 assert(TLI.isTypeLegal(WideVecVT) &&
16207 "We only lower types that form legal widened vector types");
16209 SmallVector<SDValue, 8> Chains;
16210 SDValue Ptr = Ld->getBasePtr();
16211 SDValue Increment =
16212 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16213 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16215 for (unsigned i = 0; i < NumLoads; ++i) {
16216 // Perform a single load.
16217 SDValue ScalarLoad =
16218 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16219 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16220 Ld->getAlignment());
16221 Chains.push_back(ScalarLoad.getValue(1));
16222 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16223 // another round of DAGCombining.
16225 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16227 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16228 ScalarLoad, DAG.getIntPtrConstant(i));
16230 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16233 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16235 // Bitcast the loaded value to a vector of the original element type, in
16236 // the size of the target vector type.
16237 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16238 unsigned SizeRatio = RegSz / MemSz;
16240 if (Ext == ISD::SEXTLOAD) {
16241 // If we have SSE4.1, we can directly emit a VSEXT node.
16242 if (Subtarget->hasSSE41()) {
16243 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16244 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16248 // Otherwise we'll shuffle the small elements in the high bits of the
16249 // larger type and perform an arithmetic shift. If the shift is not legal
16250 // it's better to scalarize.
16251 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16252 "We can't implement a sext load without an arithmetic right shift!");
16254 // Redistribute the loaded elements into the different locations.
16255 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16256 for (unsigned i = 0; i != NumElems; ++i)
16257 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16259 SDValue Shuff = DAG.getVectorShuffle(
16260 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16262 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16264 // Build the arithmetic shift.
16265 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16266 MemVT.getVectorElementType().getSizeInBits();
16268 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16270 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16274 // Redistribute the loaded elements into the different locations.
16275 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16276 for (unsigned i = 0; i != NumElems; ++i)
16277 ShuffleVec[i * SizeRatio] = i;
16279 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16280 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16282 // Bitcast to the requested type.
16283 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16284 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16288 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16289 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16290 // from the AND / OR.
16291 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16292 Opc = Op.getOpcode();
16293 if (Opc != ISD::OR && Opc != ISD::AND)
16295 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16296 Op.getOperand(0).hasOneUse() &&
16297 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16298 Op.getOperand(1).hasOneUse());
16301 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16302 // 1 and that the SETCC node has a single use.
16303 static bool isXor1OfSetCC(SDValue Op) {
16304 if (Op.getOpcode() != ISD::XOR)
16306 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16307 if (N1C && N1C->getAPIntValue() == 1) {
16308 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16309 Op.getOperand(0).hasOneUse();
16314 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16315 bool addTest = true;
16316 SDValue Chain = Op.getOperand(0);
16317 SDValue Cond = Op.getOperand(1);
16318 SDValue Dest = Op.getOperand(2);
16321 bool Inverted = false;
16323 if (Cond.getOpcode() == ISD::SETCC) {
16324 // Check for setcc([su]{add,sub,mul}o == 0).
16325 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16326 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16327 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16328 Cond.getOperand(0).getResNo() == 1 &&
16329 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16330 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16331 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16332 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16333 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16334 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16336 Cond = Cond.getOperand(0);
16338 SDValue NewCond = LowerSETCC(Cond, DAG);
16339 if (NewCond.getNode())
16344 // FIXME: LowerXALUO doesn't handle these!!
16345 else if (Cond.getOpcode() == X86ISD::ADD ||
16346 Cond.getOpcode() == X86ISD::SUB ||
16347 Cond.getOpcode() == X86ISD::SMUL ||
16348 Cond.getOpcode() == X86ISD::UMUL)
16349 Cond = LowerXALUO(Cond, DAG);
16352 // Look pass (and (setcc_carry (cmp ...)), 1).
16353 if (Cond.getOpcode() == ISD::AND &&
16354 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16355 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16356 if (C && C->getAPIntValue() == 1)
16357 Cond = Cond.getOperand(0);
16360 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16361 // setting operand in place of the X86ISD::SETCC.
16362 unsigned CondOpcode = Cond.getOpcode();
16363 if (CondOpcode == X86ISD::SETCC ||
16364 CondOpcode == X86ISD::SETCC_CARRY) {
16365 CC = Cond.getOperand(0);
16367 SDValue Cmp = Cond.getOperand(1);
16368 unsigned Opc = Cmp.getOpcode();
16369 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16370 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16374 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16378 // These can only come from an arithmetic instruction with overflow,
16379 // e.g. SADDO, UADDO.
16380 Cond = Cond.getNode()->getOperand(1);
16386 CondOpcode = Cond.getOpcode();
16387 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16388 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16389 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16390 Cond.getOperand(0).getValueType() != MVT::i8)) {
16391 SDValue LHS = Cond.getOperand(0);
16392 SDValue RHS = Cond.getOperand(1);
16393 unsigned X86Opcode;
16396 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16397 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16399 switch (CondOpcode) {
16400 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16402 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16404 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16407 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16408 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16410 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16412 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16415 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16416 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16417 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16418 default: llvm_unreachable("unexpected overflowing operator");
16421 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16422 if (CondOpcode == ISD::UMULO)
16423 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16426 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16428 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16430 if (CondOpcode == ISD::UMULO)
16431 Cond = X86Op.getValue(2);
16433 Cond = X86Op.getValue(1);
16435 CC = DAG.getConstant(X86Cond, MVT::i8);
16439 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16440 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16441 if (CondOpc == ISD::OR) {
16442 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16443 // two branches instead of an explicit OR instruction with a
16445 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16446 isX86LogicalCmp(Cmp)) {
16447 CC = Cond.getOperand(0).getOperand(0);
16448 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16449 Chain, Dest, CC, Cmp);
16450 CC = Cond.getOperand(1).getOperand(0);
16454 } else { // ISD::AND
16455 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16456 // two branches instead of an explicit AND instruction with a
16457 // separate test. However, we only do this if this block doesn't
16458 // have a fall-through edge, because this requires an explicit
16459 // jmp when the condition is false.
16460 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16461 isX86LogicalCmp(Cmp) &&
16462 Op.getNode()->hasOneUse()) {
16463 X86::CondCode CCode =
16464 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16465 CCode = X86::GetOppositeBranchCondition(CCode);
16466 CC = DAG.getConstant(CCode, MVT::i8);
16467 SDNode *User = *Op.getNode()->use_begin();
16468 // Look for an unconditional branch following this conditional branch.
16469 // We need this because we need to reverse the successors in order
16470 // to implement FCMP_OEQ.
16471 if (User->getOpcode() == ISD::BR) {
16472 SDValue FalseBB = User->getOperand(1);
16474 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16475 assert(NewBR == User);
16479 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16480 Chain, Dest, CC, Cmp);
16481 X86::CondCode CCode =
16482 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16483 CCode = X86::GetOppositeBranchCondition(CCode);
16484 CC = DAG.getConstant(CCode, MVT::i8);
16490 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16491 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16492 // It should be transformed during dag combiner except when the condition
16493 // is set by a arithmetics with overflow node.
16494 X86::CondCode CCode =
16495 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16496 CCode = X86::GetOppositeBranchCondition(CCode);
16497 CC = DAG.getConstant(CCode, MVT::i8);
16498 Cond = Cond.getOperand(0).getOperand(1);
16500 } else if (Cond.getOpcode() == ISD::SETCC &&
16501 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16502 // For FCMP_OEQ, we can emit
16503 // two branches instead of an explicit AND instruction with a
16504 // separate test. However, we only do this if this block doesn't
16505 // have a fall-through edge, because this requires an explicit
16506 // jmp when the condition is false.
16507 if (Op.getNode()->hasOneUse()) {
16508 SDNode *User = *Op.getNode()->use_begin();
16509 // Look for an unconditional branch following this conditional branch.
16510 // We need this because we need to reverse the successors in order
16511 // to implement FCMP_OEQ.
16512 if (User->getOpcode() == ISD::BR) {
16513 SDValue FalseBB = User->getOperand(1);
16515 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16516 assert(NewBR == User);
16520 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16521 Cond.getOperand(0), Cond.getOperand(1));
16522 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16523 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16524 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16525 Chain, Dest, CC, Cmp);
16526 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16531 } else if (Cond.getOpcode() == ISD::SETCC &&
16532 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16533 // For FCMP_UNE, we can emit
16534 // two branches instead of an explicit AND instruction with a
16535 // separate test. However, we only do this if this block doesn't
16536 // have a fall-through edge, because this requires an explicit
16537 // jmp when the condition is false.
16538 if (Op.getNode()->hasOneUse()) {
16539 SDNode *User = *Op.getNode()->use_begin();
16540 // Look for an unconditional branch following this conditional branch.
16541 // We need this because we need to reverse the successors in order
16542 // to implement FCMP_UNE.
16543 if (User->getOpcode() == ISD::BR) {
16544 SDValue FalseBB = User->getOperand(1);
16546 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16547 assert(NewBR == User);
16550 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16551 Cond.getOperand(0), Cond.getOperand(1));
16552 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16553 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16554 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16555 Chain, Dest, CC, Cmp);
16556 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16566 // Look pass the truncate if the high bits are known zero.
16567 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16568 Cond = Cond.getOperand(0);
16570 // We know the result of AND is compared against zero. Try to match
16572 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16573 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16574 if (NewSetCC.getNode()) {
16575 CC = NewSetCC.getOperand(0);
16576 Cond = NewSetCC.getOperand(1);
16583 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16584 CC = DAG.getConstant(X86Cond, MVT::i8);
16585 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16587 Cond = ConvertCmpIfNecessary(Cond, DAG);
16588 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16589 Chain, Dest, CC, Cond);
16592 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16593 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16594 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16595 // that the guard pages used by the OS virtual memory manager are allocated in
16596 // correct sequence.
16598 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16599 SelectionDAG &DAG) const {
16600 MachineFunction &MF = DAG.getMachineFunction();
16601 bool SplitStack = MF.shouldSplitStack();
16602 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16607 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16608 SDNode* Node = Op.getNode();
16610 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16611 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16612 " not tell us which reg is the stack pointer!");
16613 EVT VT = Node->getValueType(0);
16614 SDValue Tmp1 = SDValue(Node, 0);
16615 SDValue Tmp2 = SDValue(Node, 1);
16616 SDValue Tmp3 = Node->getOperand(2);
16617 SDValue Chain = Tmp1.getOperand(0);
16619 // Chain the dynamic stack allocation so that it doesn't modify the stack
16620 // pointer when other instructions are using the stack.
16621 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16624 SDValue Size = Tmp2.getOperand(1);
16625 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16626 Chain = SP.getValue(1);
16627 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16628 const TargetFrameLowering &TFI = *DAG.getSubtarget().getFrameLowering();
16629 unsigned StackAlign = TFI.getStackAlignment();
16630 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16631 if (Align > StackAlign)
16632 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16633 DAG.getConstant(-(uint64_t)Align, VT));
16634 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16636 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16637 DAG.getIntPtrConstant(0, true), SDValue(),
16640 SDValue Ops[2] = { Tmp1, Tmp2 };
16641 return DAG.getMergeValues(Ops, dl);
16645 SDValue Chain = Op.getOperand(0);
16646 SDValue Size = Op.getOperand(1);
16647 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16648 EVT VT = Op.getNode()->getValueType(0);
16650 bool Is64Bit = Subtarget->is64Bit();
16651 EVT SPTy = getPointerTy();
16654 MachineRegisterInfo &MRI = MF.getRegInfo();
16657 // The 64 bit implementation of segmented stacks needs to clobber both r10
16658 // r11. This makes it impossible to use it along with nested parameters.
16659 const Function *F = MF.getFunction();
16661 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16663 if (I->hasNestAttr())
16664 report_fatal_error("Cannot use segmented stacks with functions that "
16665 "have nested arguments.");
16668 const TargetRegisterClass *AddrRegClass =
16669 getRegClassFor(getPointerTy());
16670 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16671 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16672 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16673 DAG.getRegister(Vreg, SPTy));
16674 SDValue Ops1[2] = { Value, Chain };
16675 return DAG.getMergeValues(Ops1, dl);
16678 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16680 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16681 Flag = Chain.getValue(1);
16682 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16684 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
16686 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
16687 DAG.getSubtarget().getRegisterInfo());
16688 unsigned SPReg = RegInfo->getStackRegister();
16689 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16690 Chain = SP.getValue(1);
16693 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
16694 DAG.getConstant(-(uint64_t)Align, VT));
16695 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
16698 SDValue Ops1[2] = { SP, Chain };
16699 return DAG.getMergeValues(Ops1, dl);
16703 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
16704 MachineFunction &MF = DAG.getMachineFunction();
16705 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
16707 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16710 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
16711 // vastart just stores the address of the VarArgsFrameIndex slot into the
16712 // memory location argument.
16713 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16715 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
16716 MachinePointerInfo(SV), false, false, 0);
16720 // gp_offset (0 - 6 * 8)
16721 // fp_offset (48 - 48 + 8 * 16)
16722 // overflow_arg_area (point to parameters coming in memory).
16724 SmallVector<SDValue, 8> MemOps;
16725 SDValue FIN = Op.getOperand(1);
16727 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
16728 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
16730 FIN, MachinePointerInfo(SV), false, false, 0);
16731 MemOps.push_back(Store);
16734 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16735 FIN, DAG.getIntPtrConstant(4));
16736 Store = DAG.getStore(Op.getOperand(0), DL,
16737 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
16739 FIN, MachinePointerInfo(SV, 4), false, false, 0);
16740 MemOps.push_back(Store);
16742 // Store ptr to overflow_arg_area
16743 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16744 FIN, DAG.getIntPtrConstant(4));
16745 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16747 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
16748 MachinePointerInfo(SV, 8),
16750 MemOps.push_back(Store);
16752 // Store ptr to reg_save_area.
16753 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16754 FIN, DAG.getIntPtrConstant(8));
16755 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
16757 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
16758 MachinePointerInfo(SV, 16), false, false, 0);
16759 MemOps.push_back(Store);
16760 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
16763 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
16764 assert(Subtarget->is64Bit() &&
16765 "LowerVAARG only handles 64-bit va_arg!");
16766 assert((Subtarget->isTargetLinux() ||
16767 Subtarget->isTargetDarwin()) &&
16768 "Unhandled target in LowerVAARG");
16769 assert(Op.getNode()->getNumOperands() == 4);
16770 SDValue Chain = Op.getOperand(0);
16771 SDValue SrcPtr = Op.getOperand(1);
16772 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16773 unsigned Align = Op.getConstantOperandVal(3);
16776 EVT ArgVT = Op.getNode()->getValueType(0);
16777 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
16778 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
16781 // Decide which area this value should be read from.
16782 // TODO: Implement the AMD64 ABI in its entirety. This simple
16783 // selection mechanism works only for the basic types.
16784 if (ArgVT == MVT::f80) {
16785 llvm_unreachable("va_arg for f80 not yet implemented");
16786 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
16787 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
16788 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
16789 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
16791 llvm_unreachable("Unhandled argument type in LowerVAARG");
16794 if (ArgMode == 2) {
16795 // Sanity Check: Make sure using fp_offset makes sense.
16796 assert(!DAG.getTarget().Options.UseSoftFloat &&
16797 !(DAG.getMachineFunction()
16798 .getFunction()->getAttributes()
16799 .hasAttribute(AttributeSet::FunctionIndex,
16800 Attribute::NoImplicitFloat)) &&
16801 Subtarget->hasSSE1());
16804 // Insert VAARG_64 node into the DAG
16805 // VAARG_64 returns two values: Variable Argument Address, Chain
16806 SmallVector<SDValue, 11> InstOps;
16807 InstOps.push_back(Chain);
16808 InstOps.push_back(SrcPtr);
16809 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
16810 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
16811 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
16812 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
16813 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
16814 VTs, InstOps, MVT::i64,
16815 MachinePointerInfo(SV),
16817 /*Volatile=*/false,
16819 /*WriteMem=*/true);
16820 Chain = VAARG.getValue(1);
16822 // Load the next argument and return it
16823 return DAG.getLoad(ArgVT, dl,
16826 MachinePointerInfo(),
16827 false, false, false, 0);
16830 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
16831 SelectionDAG &DAG) {
16832 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
16833 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
16834 SDValue Chain = Op.getOperand(0);
16835 SDValue DstPtr = Op.getOperand(1);
16836 SDValue SrcPtr = Op.getOperand(2);
16837 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
16838 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
16841 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
16842 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
16844 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
16847 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
16848 // amount is a constant. Takes immediate version of shift as input.
16849 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
16850 SDValue SrcOp, uint64_t ShiftAmt,
16851 SelectionDAG &DAG) {
16852 MVT ElementType = VT.getVectorElementType();
16854 // Fold this packed shift into its first operand if ShiftAmt is 0.
16858 // Check for ShiftAmt >= element width
16859 if (ShiftAmt >= ElementType.getSizeInBits()) {
16860 if (Opc == X86ISD::VSRAI)
16861 ShiftAmt = ElementType.getSizeInBits() - 1;
16863 return DAG.getConstant(0, VT);
16866 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
16867 && "Unknown target vector shift-by-constant node");
16869 // Fold this packed vector shift into a build vector if SrcOp is a
16870 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
16871 if (VT == SrcOp.getSimpleValueType() &&
16872 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
16873 SmallVector<SDValue, 8> Elts;
16874 unsigned NumElts = SrcOp->getNumOperands();
16875 ConstantSDNode *ND;
16878 default: llvm_unreachable(nullptr);
16879 case X86ISD::VSHLI:
16880 for (unsigned i=0; i!=NumElts; ++i) {
16881 SDValue CurrentOp = SrcOp->getOperand(i);
16882 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16883 Elts.push_back(CurrentOp);
16886 ND = cast<ConstantSDNode>(CurrentOp);
16887 const APInt &C = ND->getAPIntValue();
16888 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
16891 case X86ISD::VSRLI:
16892 for (unsigned i=0; i!=NumElts; ++i) {
16893 SDValue CurrentOp = SrcOp->getOperand(i);
16894 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16895 Elts.push_back(CurrentOp);
16898 ND = cast<ConstantSDNode>(CurrentOp);
16899 const APInt &C = ND->getAPIntValue();
16900 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
16903 case X86ISD::VSRAI:
16904 for (unsigned i=0; i!=NumElts; ++i) {
16905 SDValue CurrentOp = SrcOp->getOperand(i);
16906 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16907 Elts.push_back(CurrentOp);
16910 ND = cast<ConstantSDNode>(CurrentOp);
16911 const APInt &C = ND->getAPIntValue();
16912 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
16917 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
16920 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
16923 // getTargetVShiftNode - Handle vector element shifts where the shift amount
16924 // may or may not be a constant. Takes immediate version of shift as input.
16925 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
16926 SDValue SrcOp, SDValue ShAmt,
16927 SelectionDAG &DAG) {
16928 MVT SVT = ShAmt.getSimpleValueType();
16929 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
16931 // Catch shift-by-constant.
16932 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
16933 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
16934 CShAmt->getZExtValue(), DAG);
16936 // Change opcode to non-immediate version
16938 default: llvm_unreachable("Unknown target vector shift node");
16939 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
16940 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
16941 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
16944 const X86Subtarget &Subtarget =
16945 DAG.getTarget().getSubtarget<X86Subtarget>();
16946 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
16947 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
16948 // Let the shuffle legalizer expand this shift amount node.
16949 SDValue Op0 = ShAmt.getOperand(0);
16950 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
16951 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
16953 // Need to build a vector containing shift amount.
16954 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
16955 SmallVector<SDValue, 4> ShOps;
16956 ShOps.push_back(ShAmt);
16957 if (SVT == MVT::i32) {
16958 ShOps.push_back(DAG.getConstant(0, SVT));
16959 ShOps.push_back(DAG.getUNDEF(SVT));
16961 ShOps.push_back(DAG.getUNDEF(SVT));
16963 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
16964 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
16967 // The return type has to be a 128-bit type with the same element
16968 // type as the input type.
16969 MVT EltVT = VT.getVectorElementType();
16970 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
16972 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
16973 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
16976 /// \brief Return (and \p Op, \p Mask) for compare instructions or
16977 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
16978 /// necessary casting for \p Mask when lowering masking intrinsics.
16979 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
16980 SDValue PreservedSrc,
16981 const X86Subtarget *Subtarget,
16982 SelectionDAG &DAG) {
16983 EVT VT = Op.getValueType();
16984 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
16985 MVT::i1, VT.getVectorNumElements());
16986 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
16987 Mask.getValueType().getSizeInBits());
16990 assert(MaskVT.isSimple() && "invalid mask type");
16992 if (isAllOnes(Mask))
16995 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
16996 // are extracted by EXTRACT_SUBVECTOR.
16997 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
16998 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
16999 DAG.getIntPtrConstant(0));
17001 switch (Op.getOpcode()) {
17003 case X86ISD::PCMPEQM:
17004 case X86ISD::PCMPGTM:
17006 case X86ISD::CMPMU:
17007 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17009 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17010 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17011 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17014 /// \brief Creates an SDNode for a predicated scalar operation.
17015 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17016 /// The mask is comming as MVT::i8 and it should be truncated
17017 /// to MVT::i1 while lowering masking intrinsics.
17018 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17019 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17020 /// a scalar instruction.
17021 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17022 SDValue PreservedSrc,
17023 const X86Subtarget *Subtarget,
17024 SelectionDAG &DAG) {
17025 if (isAllOnes(Mask))
17028 EVT VT = Op.getValueType();
17030 // The mask should be of type MVT::i1
17031 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17033 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17034 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17035 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17038 static unsigned getOpcodeForFMAIntrinsic(unsigned IntNo) {
17040 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17041 case Intrinsic::x86_fma_vfmadd_ps:
17042 case Intrinsic::x86_fma_vfmadd_pd:
17043 case Intrinsic::x86_fma_vfmadd_ps_256:
17044 case Intrinsic::x86_fma_vfmadd_pd_256:
17045 case Intrinsic::x86_fma_mask_vfmadd_ps_512:
17046 case Intrinsic::x86_fma_mask_vfmadd_pd_512:
17047 return X86ISD::FMADD;
17048 case Intrinsic::x86_fma_vfmsub_ps:
17049 case Intrinsic::x86_fma_vfmsub_pd:
17050 case Intrinsic::x86_fma_vfmsub_ps_256:
17051 case Intrinsic::x86_fma_vfmsub_pd_256:
17052 case Intrinsic::x86_fma_mask_vfmsub_ps_512:
17053 case Intrinsic::x86_fma_mask_vfmsub_pd_512:
17054 return X86ISD::FMSUB;
17055 case Intrinsic::x86_fma_vfnmadd_ps:
17056 case Intrinsic::x86_fma_vfnmadd_pd:
17057 case Intrinsic::x86_fma_vfnmadd_ps_256:
17058 case Intrinsic::x86_fma_vfnmadd_pd_256:
17059 case Intrinsic::x86_fma_mask_vfnmadd_ps_512:
17060 case Intrinsic::x86_fma_mask_vfnmadd_pd_512:
17061 return X86ISD::FNMADD;
17062 case Intrinsic::x86_fma_vfnmsub_ps:
17063 case Intrinsic::x86_fma_vfnmsub_pd:
17064 case Intrinsic::x86_fma_vfnmsub_ps_256:
17065 case Intrinsic::x86_fma_vfnmsub_pd_256:
17066 case Intrinsic::x86_fma_mask_vfnmsub_ps_512:
17067 case Intrinsic::x86_fma_mask_vfnmsub_pd_512:
17068 return X86ISD::FNMSUB;
17069 case Intrinsic::x86_fma_vfmaddsub_ps:
17070 case Intrinsic::x86_fma_vfmaddsub_pd:
17071 case Intrinsic::x86_fma_vfmaddsub_ps_256:
17072 case Intrinsic::x86_fma_vfmaddsub_pd_256:
17073 case Intrinsic::x86_fma_mask_vfmaddsub_ps_512:
17074 case Intrinsic::x86_fma_mask_vfmaddsub_pd_512:
17075 return X86ISD::FMADDSUB;
17076 case Intrinsic::x86_fma_vfmsubadd_ps:
17077 case Intrinsic::x86_fma_vfmsubadd_pd:
17078 case Intrinsic::x86_fma_vfmsubadd_ps_256:
17079 case Intrinsic::x86_fma_vfmsubadd_pd_256:
17080 case Intrinsic::x86_fma_mask_vfmsubadd_ps_512:
17081 case Intrinsic::x86_fma_mask_vfmsubadd_pd_512:
17082 return X86ISD::FMSUBADD;
17086 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17087 SelectionDAG &DAG) {
17089 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17090 EVT VT = Op.getValueType();
17091 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17093 switch(IntrData->Type) {
17094 case INTR_TYPE_1OP:
17095 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17096 case INTR_TYPE_2OP:
17097 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17099 case INTR_TYPE_3OP:
17100 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17101 Op.getOperand(2), Op.getOperand(3));
17102 case INTR_TYPE_1OP_MASK_RM: {
17103 SDValue Src = Op.getOperand(1);
17104 SDValue Src0 = Op.getOperand(2);
17105 SDValue Mask = Op.getOperand(3);
17106 SDValue RoundingMode = Op.getOperand(4);
17107 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17109 Mask, Src0, Subtarget, DAG);
17111 case INTR_TYPE_SCALAR_MASK_RM: {
17112 SDValue Src1 = Op.getOperand(1);
17113 SDValue Src2 = Op.getOperand(2);
17114 SDValue Src0 = Op.getOperand(3);
17115 SDValue Mask = Op.getOperand(4);
17116 SDValue RoundingMode = Op.getOperand(5);
17117 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17119 Mask, Src0, Subtarget, DAG);
17121 case INTR_TYPE_2OP_MASK: {
17122 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Op.getOperand(1),
17124 Op.getOperand(4), Op.getOperand(3), Subtarget, DAG);
17127 case CMP_MASK_CC: {
17128 // Comparison intrinsics with masks.
17129 // Example of transformation:
17130 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17131 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17133 // (v8i1 (insert_subvector undef,
17134 // (v2i1 (and (PCMPEQM %a, %b),
17135 // (extract_subvector
17136 // (v8i1 (bitcast %mask)), 0))), 0))))
17137 EVT VT = Op.getOperand(1).getValueType();
17138 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17139 VT.getVectorNumElements());
17140 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17141 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17142 Mask.getValueType().getSizeInBits());
17144 if (IntrData->Type == CMP_MASK_CC) {
17145 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17146 Op.getOperand(2), Op.getOperand(3));
17148 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17149 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17152 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17153 DAG.getTargetConstant(0, MaskVT),
17155 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17156 DAG.getUNDEF(BitcastVT), CmpMask,
17157 DAG.getIntPtrConstant(0));
17158 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17160 case COMI: { // Comparison intrinsics
17161 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17162 SDValue LHS = Op.getOperand(1);
17163 SDValue RHS = Op.getOperand(2);
17164 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17165 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17166 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17167 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17168 DAG.getConstant(X86CC, MVT::i8), Cond);
17169 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17172 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17173 Op.getOperand(1), Op.getOperand(2), DAG);
17175 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17176 Op.getSimpleValueType(),
17178 Op.getOperand(2), DAG),
17179 Op.getOperand(4), Op.getOperand(3), Subtarget,
17181 case COMPRESS_EXPAND_IN_REG: {
17182 SDValue Mask = Op.getOperand(3);
17183 SDValue DataToCompress = Op.getOperand(1);
17184 SDValue PassThru = Op.getOperand(2);
17185 if (isAllOnes(Mask)) // return data as is
17186 return Op.getOperand(1);
17187 EVT VT = Op.getValueType();
17188 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17189 VT.getVectorNumElements());
17190 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17191 Mask.getValueType().getSizeInBits());
17193 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17194 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17195 DAG.getIntPtrConstant(0));
17197 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17201 SDValue Mask = Op.getOperand(3);
17202 EVT VT = Op.getValueType();
17203 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17204 VT.getVectorNumElements());
17205 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17206 Mask.getValueType().getSizeInBits());
17208 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17209 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17210 DAG.getIntPtrConstant(0));
17211 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17216 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17217 dl, Op.getValueType(),
17221 Op.getOperand(4), Op.getOperand(1),
17230 default: return SDValue(); // Don't custom lower most intrinsics.
17232 case Intrinsic::x86_avx512_mask_valign_q_512:
17233 case Intrinsic::x86_avx512_mask_valign_d_512:
17234 // Vector source operands are swapped.
17235 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17236 Op.getValueType(), Op.getOperand(2),
17239 Op.getOperand(5), Op.getOperand(4),
17242 // ptest and testp intrinsics. The intrinsic these come from are designed to
17243 // return an integer value, not just an instruction so lower it to the ptest
17244 // or testp pattern and a setcc for the result.
17245 case Intrinsic::x86_sse41_ptestz:
17246 case Intrinsic::x86_sse41_ptestc:
17247 case Intrinsic::x86_sse41_ptestnzc:
17248 case Intrinsic::x86_avx_ptestz_256:
17249 case Intrinsic::x86_avx_ptestc_256:
17250 case Intrinsic::x86_avx_ptestnzc_256:
17251 case Intrinsic::x86_avx_vtestz_ps:
17252 case Intrinsic::x86_avx_vtestc_ps:
17253 case Intrinsic::x86_avx_vtestnzc_ps:
17254 case Intrinsic::x86_avx_vtestz_pd:
17255 case Intrinsic::x86_avx_vtestc_pd:
17256 case Intrinsic::x86_avx_vtestnzc_pd:
17257 case Intrinsic::x86_avx_vtestz_ps_256:
17258 case Intrinsic::x86_avx_vtestc_ps_256:
17259 case Intrinsic::x86_avx_vtestnzc_ps_256:
17260 case Intrinsic::x86_avx_vtestz_pd_256:
17261 case Intrinsic::x86_avx_vtestc_pd_256:
17262 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17263 bool IsTestPacked = false;
17266 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17267 case Intrinsic::x86_avx_vtestz_ps:
17268 case Intrinsic::x86_avx_vtestz_pd:
17269 case Intrinsic::x86_avx_vtestz_ps_256:
17270 case Intrinsic::x86_avx_vtestz_pd_256:
17271 IsTestPacked = true; // Fallthrough
17272 case Intrinsic::x86_sse41_ptestz:
17273 case Intrinsic::x86_avx_ptestz_256:
17275 X86CC = X86::COND_E;
17277 case Intrinsic::x86_avx_vtestc_ps:
17278 case Intrinsic::x86_avx_vtestc_pd:
17279 case Intrinsic::x86_avx_vtestc_ps_256:
17280 case Intrinsic::x86_avx_vtestc_pd_256:
17281 IsTestPacked = true; // Fallthrough
17282 case Intrinsic::x86_sse41_ptestc:
17283 case Intrinsic::x86_avx_ptestc_256:
17285 X86CC = X86::COND_B;
17287 case Intrinsic::x86_avx_vtestnzc_ps:
17288 case Intrinsic::x86_avx_vtestnzc_pd:
17289 case Intrinsic::x86_avx_vtestnzc_ps_256:
17290 case Intrinsic::x86_avx_vtestnzc_pd_256:
17291 IsTestPacked = true; // Fallthrough
17292 case Intrinsic::x86_sse41_ptestnzc:
17293 case Intrinsic::x86_avx_ptestnzc_256:
17295 X86CC = X86::COND_A;
17299 SDValue LHS = Op.getOperand(1);
17300 SDValue RHS = Op.getOperand(2);
17301 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17302 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17303 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17304 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17305 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17307 case Intrinsic::x86_avx512_kortestz_w:
17308 case Intrinsic::x86_avx512_kortestc_w: {
17309 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17310 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17311 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17312 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17313 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17314 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17315 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17318 case Intrinsic::x86_sse42_pcmpistria128:
17319 case Intrinsic::x86_sse42_pcmpestria128:
17320 case Intrinsic::x86_sse42_pcmpistric128:
17321 case Intrinsic::x86_sse42_pcmpestric128:
17322 case Intrinsic::x86_sse42_pcmpistrio128:
17323 case Intrinsic::x86_sse42_pcmpestrio128:
17324 case Intrinsic::x86_sse42_pcmpistris128:
17325 case Intrinsic::x86_sse42_pcmpestris128:
17326 case Intrinsic::x86_sse42_pcmpistriz128:
17327 case Intrinsic::x86_sse42_pcmpestriz128: {
17331 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17332 case Intrinsic::x86_sse42_pcmpistria128:
17333 Opcode = X86ISD::PCMPISTRI;
17334 X86CC = X86::COND_A;
17336 case Intrinsic::x86_sse42_pcmpestria128:
17337 Opcode = X86ISD::PCMPESTRI;
17338 X86CC = X86::COND_A;
17340 case Intrinsic::x86_sse42_pcmpistric128:
17341 Opcode = X86ISD::PCMPISTRI;
17342 X86CC = X86::COND_B;
17344 case Intrinsic::x86_sse42_pcmpestric128:
17345 Opcode = X86ISD::PCMPESTRI;
17346 X86CC = X86::COND_B;
17348 case Intrinsic::x86_sse42_pcmpistrio128:
17349 Opcode = X86ISD::PCMPISTRI;
17350 X86CC = X86::COND_O;
17352 case Intrinsic::x86_sse42_pcmpestrio128:
17353 Opcode = X86ISD::PCMPESTRI;
17354 X86CC = X86::COND_O;
17356 case Intrinsic::x86_sse42_pcmpistris128:
17357 Opcode = X86ISD::PCMPISTRI;
17358 X86CC = X86::COND_S;
17360 case Intrinsic::x86_sse42_pcmpestris128:
17361 Opcode = X86ISD::PCMPESTRI;
17362 X86CC = X86::COND_S;
17364 case Intrinsic::x86_sse42_pcmpistriz128:
17365 Opcode = X86ISD::PCMPISTRI;
17366 X86CC = X86::COND_E;
17368 case Intrinsic::x86_sse42_pcmpestriz128:
17369 Opcode = X86ISD::PCMPESTRI;
17370 X86CC = X86::COND_E;
17373 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17374 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17375 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17376 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17377 DAG.getConstant(X86CC, MVT::i8),
17378 SDValue(PCMP.getNode(), 1));
17379 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17382 case Intrinsic::x86_sse42_pcmpistri128:
17383 case Intrinsic::x86_sse42_pcmpestri128: {
17385 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17386 Opcode = X86ISD::PCMPISTRI;
17388 Opcode = X86ISD::PCMPESTRI;
17390 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17391 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17392 return DAG.getNode(Opcode, dl, VTs, NewOps);
17395 case Intrinsic::x86_fma_mask_vfmadd_ps_512:
17396 case Intrinsic::x86_fma_mask_vfmadd_pd_512:
17397 case Intrinsic::x86_fma_mask_vfmsub_ps_512:
17398 case Intrinsic::x86_fma_mask_vfmsub_pd_512:
17399 case Intrinsic::x86_fma_mask_vfnmadd_ps_512:
17400 case Intrinsic::x86_fma_mask_vfnmadd_pd_512:
17401 case Intrinsic::x86_fma_mask_vfnmsub_ps_512:
17402 case Intrinsic::x86_fma_mask_vfnmsub_pd_512:
17403 case Intrinsic::x86_fma_mask_vfmaddsub_ps_512:
17404 case Intrinsic::x86_fma_mask_vfmaddsub_pd_512:
17405 case Intrinsic::x86_fma_mask_vfmsubadd_ps_512:
17406 case Intrinsic::x86_fma_mask_vfmsubadd_pd_512: {
17407 auto *SAE = cast<ConstantSDNode>(Op.getOperand(5));
17408 if (SAE->getZExtValue() == X86::STATIC_ROUNDING::CUR_DIRECTION)
17409 return getVectorMaskingNode(DAG.getNode(getOpcodeForFMAIntrinsic(IntNo),
17410 dl, Op.getValueType(),
17414 Op.getOperand(4), Op.getOperand(1),
17420 case Intrinsic::x86_fma_vfmadd_ps:
17421 case Intrinsic::x86_fma_vfmadd_pd:
17422 case Intrinsic::x86_fma_vfmsub_ps:
17423 case Intrinsic::x86_fma_vfmsub_pd:
17424 case Intrinsic::x86_fma_vfnmadd_ps:
17425 case Intrinsic::x86_fma_vfnmadd_pd:
17426 case Intrinsic::x86_fma_vfnmsub_ps:
17427 case Intrinsic::x86_fma_vfnmsub_pd:
17428 case Intrinsic::x86_fma_vfmaddsub_ps:
17429 case Intrinsic::x86_fma_vfmaddsub_pd:
17430 case Intrinsic::x86_fma_vfmsubadd_ps:
17431 case Intrinsic::x86_fma_vfmsubadd_pd:
17432 case Intrinsic::x86_fma_vfmadd_ps_256:
17433 case Intrinsic::x86_fma_vfmadd_pd_256:
17434 case Intrinsic::x86_fma_vfmsub_ps_256:
17435 case Intrinsic::x86_fma_vfmsub_pd_256:
17436 case Intrinsic::x86_fma_vfnmadd_ps_256:
17437 case Intrinsic::x86_fma_vfnmadd_pd_256:
17438 case Intrinsic::x86_fma_vfnmsub_ps_256:
17439 case Intrinsic::x86_fma_vfnmsub_pd_256:
17440 case Intrinsic::x86_fma_vfmaddsub_ps_256:
17441 case Intrinsic::x86_fma_vfmaddsub_pd_256:
17442 case Intrinsic::x86_fma_vfmsubadd_ps_256:
17443 case Intrinsic::x86_fma_vfmsubadd_pd_256:
17444 return DAG.getNode(getOpcodeForFMAIntrinsic(IntNo), dl, Op.getValueType(),
17445 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
17449 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17450 SDValue Src, SDValue Mask, SDValue Base,
17451 SDValue Index, SDValue ScaleOp, SDValue Chain,
17452 const X86Subtarget * Subtarget) {
17454 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17455 assert(C && "Invalid scale type");
17456 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17457 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17458 Index.getSimpleValueType().getVectorNumElements());
17460 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17462 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17464 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17465 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17466 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17467 SDValue Segment = DAG.getRegister(0, MVT::i32);
17468 if (Src.getOpcode() == ISD::UNDEF)
17469 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17470 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17471 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17472 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17473 return DAG.getMergeValues(RetOps, dl);
17476 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17477 SDValue Src, SDValue Mask, SDValue Base,
17478 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17480 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17481 assert(C && "Invalid scale type");
17482 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17483 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17484 SDValue Segment = DAG.getRegister(0, MVT::i32);
17485 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17486 Index.getSimpleValueType().getVectorNumElements());
17488 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17490 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17492 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17493 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17494 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17495 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17496 return SDValue(Res, 1);
17499 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17500 SDValue Mask, SDValue Base, SDValue Index,
17501 SDValue ScaleOp, SDValue Chain) {
17503 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17504 assert(C && "Invalid scale type");
17505 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17506 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17507 SDValue Segment = DAG.getRegister(0, MVT::i32);
17509 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17511 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17513 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17515 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17516 //SDVTList VTs = DAG.getVTList(MVT::Other);
17517 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17518 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17519 return SDValue(Res, 0);
17522 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17523 // read performance monitor counters (x86_rdpmc).
17524 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17525 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17526 SmallVectorImpl<SDValue> &Results) {
17527 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17528 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17531 // The ECX register is used to select the index of the performance counter
17533 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17535 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17537 // Reads the content of a 64-bit performance counter and returns it in the
17538 // registers EDX:EAX.
17539 if (Subtarget->is64Bit()) {
17540 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17541 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17544 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17545 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17548 Chain = HI.getValue(1);
17550 if (Subtarget->is64Bit()) {
17551 // The EAX register is loaded with the low-order 32 bits. The EDX register
17552 // is loaded with the supported high-order bits of the counter.
17553 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17554 DAG.getConstant(32, MVT::i8));
17555 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17556 Results.push_back(Chain);
17560 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17561 SDValue Ops[] = { LO, HI };
17562 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17563 Results.push_back(Pair);
17564 Results.push_back(Chain);
17567 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17568 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17569 // also used to custom lower READCYCLECOUNTER nodes.
17570 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17571 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17572 SmallVectorImpl<SDValue> &Results) {
17573 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17574 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17577 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17578 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17579 // and the EAX register is loaded with the low-order 32 bits.
17580 if (Subtarget->is64Bit()) {
17581 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17582 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17585 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17586 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17589 SDValue Chain = HI.getValue(1);
17591 if (Opcode == X86ISD::RDTSCP_DAG) {
17592 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17594 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17595 // the ECX register. Add 'ecx' explicitly to the chain.
17596 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17598 // Explicitly store the content of ECX at the location passed in input
17599 // to the 'rdtscp' intrinsic.
17600 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17601 MachinePointerInfo(), false, false, 0);
17604 if (Subtarget->is64Bit()) {
17605 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17606 // the EAX register is loaded with the low-order 32 bits.
17607 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17608 DAG.getConstant(32, MVT::i8));
17609 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17610 Results.push_back(Chain);
17614 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17615 SDValue Ops[] = { LO, HI };
17616 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17617 Results.push_back(Pair);
17618 Results.push_back(Chain);
17621 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17622 SelectionDAG &DAG) {
17623 SmallVector<SDValue, 2> Results;
17625 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17627 return DAG.getMergeValues(Results, DL);
17631 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17632 SelectionDAG &DAG) {
17633 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17635 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17640 switch(IntrData->Type) {
17642 llvm_unreachable("Unknown Intrinsic Type");
17646 // Emit the node with the right value type.
17647 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17648 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17650 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17651 // Otherwise return the value from Rand, which is always 0, casted to i32.
17652 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17653 DAG.getConstant(1, Op->getValueType(1)),
17654 DAG.getConstant(X86::COND_B, MVT::i32),
17655 SDValue(Result.getNode(), 1) };
17656 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17657 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17660 // Return { result, isValid, chain }.
17661 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17662 SDValue(Result.getNode(), 2));
17665 //gather(v1, mask, index, base, scale);
17666 SDValue Chain = Op.getOperand(0);
17667 SDValue Src = Op.getOperand(2);
17668 SDValue Base = Op.getOperand(3);
17669 SDValue Index = Op.getOperand(4);
17670 SDValue Mask = Op.getOperand(5);
17671 SDValue Scale = Op.getOperand(6);
17672 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17676 //scatter(base, mask, index, v1, scale);
17677 SDValue Chain = Op.getOperand(0);
17678 SDValue Base = Op.getOperand(2);
17679 SDValue Mask = Op.getOperand(3);
17680 SDValue Index = Op.getOperand(4);
17681 SDValue Src = Op.getOperand(5);
17682 SDValue Scale = Op.getOperand(6);
17683 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17686 SDValue Hint = Op.getOperand(6);
17688 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17689 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17690 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17691 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17692 SDValue Chain = Op.getOperand(0);
17693 SDValue Mask = Op.getOperand(2);
17694 SDValue Index = Op.getOperand(3);
17695 SDValue Base = Op.getOperand(4);
17696 SDValue Scale = Op.getOperand(5);
17697 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17699 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17701 SmallVector<SDValue, 2> Results;
17702 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17703 return DAG.getMergeValues(Results, dl);
17705 // Read Performance Monitoring Counters.
17707 SmallVector<SDValue, 2> Results;
17708 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17709 return DAG.getMergeValues(Results, dl);
17711 // XTEST intrinsics.
17713 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17714 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17715 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17716 DAG.getConstant(X86::COND_NE, MVT::i8),
17718 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17719 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17720 Ret, SDValue(InTrans.getNode(), 1));
17724 SmallVector<SDValue, 2> Results;
17725 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17726 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17727 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17728 DAG.getConstant(-1, MVT::i8));
17729 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17730 Op.getOperand(4), GenCF.getValue(1));
17731 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17732 Op.getOperand(5), MachinePointerInfo(),
17734 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17735 DAG.getConstant(X86::COND_B, MVT::i8),
17737 Results.push_back(SetCC);
17738 Results.push_back(Store);
17739 return DAG.getMergeValues(Results, dl);
17741 case COMPRESS_TO_MEM: {
17743 SDValue Mask = Op.getOperand(4);
17744 SDValue DataToCompress = Op.getOperand(3);
17745 SDValue Addr = Op.getOperand(2);
17746 SDValue Chain = Op.getOperand(0);
17748 if (isAllOnes(Mask)) // return just a store
17749 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17750 MachinePointerInfo(), false, false, 0);
17752 EVT VT = DataToCompress.getValueType();
17753 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17754 VT.getVectorNumElements());
17755 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17756 Mask.getValueType().getSizeInBits());
17757 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17758 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17759 DAG.getIntPtrConstant(0));
17761 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17762 DataToCompress, DAG.getUNDEF(VT));
17763 return DAG.getStore(Chain, dl, Compressed, Addr,
17764 MachinePointerInfo(), false, false, 0);
17766 case EXPAND_FROM_MEM: {
17768 SDValue Mask = Op.getOperand(4);
17769 SDValue PathThru = Op.getOperand(3);
17770 SDValue Addr = Op.getOperand(2);
17771 SDValue Chain = Op.getOperand(0);
17772 EVT VT = Op.getValueType();
17774 if (isAllOnes(Mask)) // return just a load
17775 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
17777 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17778 VT.getVectorNumElements());
17779 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17780 Mask.getValueType().getSizeInBits());
17781 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17782 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17783 DAG.getIntPtrConstant(0));
17785 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
17786 false, false, false, 0);
17788 SmallVector<SDValue, 2> Results;
17789 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
17791 Results.push_back(Chain);
17792 return DAG.getMergeValues(Results, dl);
17797 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
17798 SelectionDAG &DAG) const {
17799 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17800 MFI->setReturnAddressIsTaken(true);
17802 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
17805 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17807 EVT PtrVT = getPointerTy();
17810 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
17811 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17812 DAG.getSubtarget().getRegisterInfo());
17813 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
17814 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17815 DAG.getNode(ISD::ADD, dl, PtrVT,
17816 FrameAddr, Offset),
17817 MachinePointerInfo(), false, false, false, 0);
17820 // Just load the return address.
17821 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
17822 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17823 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
17826 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
17827 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17828 MFI->setFrameAddressIsTaken(true);
17830 EVT VT = Op.getValueType();
17831 SDLoc dl(Op); // FIXME probably not meaningful
17832 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17833 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17834 DAG.getSubtarget().getRegisterInfo());
17835 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(
17836 DAG.getMachineFunction());
17837 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
17838 (FrameReg == X86::EBP && VT == MVT::i32)) &&
17839 "Invalid Frame Register!");
17840 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
17842 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
17843 MachinePointerInfo(),
17844 false, false, false, 0);
17848 // FIXME? Maybe this could be a TableGen attribute on some registers and
17849 // this table could be generated automatically from RegInfo.
17850 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
17852 unsigned Reg = StringSwitch<unsigned>(RegName)
17853 .Case("esp", X86::ESP)
17854 .Case("rsp", X86::RSP)
17858 report_fatal_error("Invalid register name global variable");
17861 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
17862 SelectionDAG &DAG) const {
17863 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17864 DAG.getSubtarget().getRegisterInfo());
17865 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
17868 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
17869 SDValue Chain = Op.getOperand(0);
17870 SDValue Offset = Op.getOperand(1);
17871 SDValue Handler = Op.getOperand(2);
17874 EVT PtrVT = getPointerTy();
17875 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17876 DAG.getSubtarget().getRegisterInfo());
17877 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
17878 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
17879 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
17880 "Invalid Frame Register!");
17881 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
17882 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
17884 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
17885 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
17886 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
17887 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
17889 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
17891 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
17892 DAG.getRegister(StoreAddrReg, PtrVT));
17895 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
17896 SelectionDAG &DAG) const {
17898 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
17899 DAG.getVTList(MVT::i32, MVT::Other),
17900 Op.getOperand(0), Op.getOperand(1));
17903 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
17904 SelectionDAG &DAG) const {
17906 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
17907 Op.getOperand(0), Op.getOperand(1));
17910 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
17911 return Op.getOperand(0);
17914 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
17915 SelectionDAG &DAG) const {
17916 SDValue Root = Op.getOperand(0);
17917 SDValue Trmp = Op.getOperand(1); // trampoline
17918 SDValue FPtr = Op.getOperand(2); // nested function
17919 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
17922 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17923 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
17925 if (Subtarget->is64Bit()) {
17926 SDValue OutChains[6];
17928 // Large code-model.
17929 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
17930 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
17932 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
17933 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
17935 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
17937 // Load the pointer to the nested function into R11.
17938 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
17939 SDValue Addr = Trmp;
17940 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17941 Addr, MachinePointerInfo(TrmpAddr),
17944 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17945 DAG.getConstant(2, MVT::i64));
17946 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
17947 MachinePointerInfo(TrmpAddr, 2),
17950 // Load the 'nest' parameter value into R10.
17951 // R10 is specified in X86CallingConv.td
17952 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
17953 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17954 DAG.getConstant(10, MVT::i64));
17955 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17956 Addr, MachinePointerInfo(TrmpAddr, 10),
17959 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17960 DAG.getConstant(12, MVT::i64));
17961 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
17962 MachinePointerInfo(TrmpAddr, 12),
17965 // Jump to the nested function.
17966 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
17967 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17968 DAG.getConstant(20, MVT::i64));
17969 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17970 Addr, MachinePointerInfo(TrmpAddr, 20),
17973 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
17974 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17975 DAG.getConstant(22, MVT::i64));
17976 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
17977 MachinePointerInfo(TrmpAddr, 22),
17980 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
17982 const Function *Func =
17983 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
17984 CallingConv::ID CC = Func->getCallingConv();
17989 llvm_unreachable("Unsupported calling convention");
17990 case CallingConv::C:
17991 case CallingConv::X86_StdCall: {
17992 // Pass 'nest' parameter in ECX.
17993 // Must be kept in sync with X86CallingConv.td
17994 NestReg = X86::ECX;
17996 // Check that ECX wasn't needed by an 'inreg' parameter.
17997 FunctionType *FTy = Func->getFunctionType();
17998 const AttributeSet &Attrs = Func->getAttributes();
18000 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18001 unsigned InRegCount = 0;
18004 for (FunctionType::param_iterator I = FTy->param_begin(),
18005 E = FTy->param_end(); I != E; ++I, ++Idx)
18006 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18007 // FIXME: should only count parameters that are lowered to integers.
18008 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18010 if (InRegCount > 2) {
18011 report_fatal_error("Nest register in use - reduce number of inreg"
18017 case CallingConv::X86_FastCall:
18018 case CallingConv::X86_ThisCall:
18019 case CallingConv::Fast:
18020 // Pass 'nest' parameter in EAX.
18021 // Must be kept in sync with X86CallingConv.td
18022 NestReg = X86::EAX;
18026 SDValue OutChains[4];
18027 SDValue Addr, Disp;
18029 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18030 DAG.getConstant(10, MVT::i32));
18031 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18033 // This is storing the opcode for MOV32ri.
18034 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18035 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18036 OutChains[0] = DAG.getStore(Root, dl,
18037 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18038 Trmp, MachinePointerInfo(TrmpAddr),
18041 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18042 DAG.getConstant(1, MVT::i32));
18043 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18044 MachinePointerInfo(TrmpAddr, 1),
18047 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18048 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18049 DAG.getConstant(5, MVT::i32));
18050 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18051 MachinePointerInfo(TrmpAddr, 5),
18054 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18055 DAG.getConstant(6, MVT::i32));
18056 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18057 MachinePointerInfo(TrmpAddr, 6),
18060 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18064 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18065 SelectionDAG &DAG) const {
18067 The rounding mode is in bits 11:10 of FPSR, and has the following
18069 00 Round to nearest
18074 FLT_ROUNDS, on the other hand, expects the following:
18081 To perform the conversion, we do:
18082 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18085 MachineFunction &MF = DAG.getMachineFunction();
18086 const TargetMachine &TM = MF.getTarget();
18087 const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
18088 unsigned StackAlignment = TFI.getStackAlignment();
18089 MVT VT = Op.getSimpleValueType();
18092 // Save FP Control Word to stack slot
18093 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18094 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18096 MachineMemOperand *MMO =
18097 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18098 MachineMemOperand::MOStore, 2, 2);
18100 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18101 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18102 DAG.getVTList(MVT::Other),
18103 Ops, MVT::i16, MMO);
18105 // Load FP Control Word from stack slot
18106 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18107 MachinePointerInfo(), false, false, false, 0);
18109 // Transform as necessary
18111 DAG.getNode(ISD::SRL, DL, MVT::i16,
18112 DAG.getNode(ISD::AND, DL, MVT::i16,
18113 CWD, DAG.getConstant(0x800, MVT::i16)),
18114 DAG.getConstant(11, MVT::i8));
18116 DAG.getNode(ISD::SRL, DL, MVT::i16,
18117 DAG.getNode(ISD::AND, DL, MVT::i16,
18118 CWD, DAG.getConstant(0x400, MVT::i16)),
18119 DAG.getConstant(9, MVT::i8));
18122 DAG.getNode(ISD::AND, DL, MVT::i16,
18123 DAG.getNode(ISD::ADD, DL, MVT::i16,
18124 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18125 DAG.getConstant(1, MVT::i16)),
18126 DAG.getConstant(3, MVT::i16));
18128 return DAG.getNode((VT.getSizeInBits() < 16 ?
18129 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18132 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18133 MVT VT = Op.getSimpleValueType();
18135 unsigned NumBits = VT.getSizeInBits();
18138 Op = Op.getOperand(0);
18139 if (VT == MVT::i8) {
18140 // Zero extend to i32 since there is not an i8 bsr.
18142 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18145 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18146 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18147 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18149 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18152 DAG.getConstant(NumBits+NumBits-1, OpVT),
18153 DAG.getConstant(X86::COND_E, MVT::i8),
18156 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18158 // Finally xor with NumBits-1.
18159 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18162 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18166 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18167 MVT VT = Op.getSimpleValueType();
18169 unsigned NumBits = VT.getSizeInBits();
18172 Op = Op.getOperand(0);
18173 if (VT == MVT::i8) {
18174 // Zero extend to i32 since there is not an i8 bsr.
18176 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18179 // Issue a bsr (scan bits in reverse).
18180 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18181 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18183 // And xor with NumBits-1.
18184 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18187 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18191 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18192 MVT VT = Op.getSimpleValueType();
18193 unsigned NumBits = VT.getSizeInBits();
18195 Op = Op.getOperand(0);
18197 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18198 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18199 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18201 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18204 DAG.getConstant(NumBits, VT),
18205 DAG.getConstant(X86::COND_E, MVT::i8),
18208 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18211 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18212 // ones, and then concatenate the result back.
18213 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18214 MVT VT = Op.getSimpleValueType();
18216 assert(VT.is256BitVector() && VT.isInteger() &&
18217 "Unsupported value type for operation");
18219 unsigned NumElems = VT.getVectorNumElements();
18222 // Extract the LHS vectors
18223 SDValue LHS = Op.getOperand(0);
18224 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18225 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18227 // Extract the RHS vectors
18228 SDValue RHS = Op.getOperand(1);
18229 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18230 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18232 MVT EltVT = VT.getVectorElementType();
18233 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18235 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18236 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18237 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18240 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18241 assert(Op.getSimpleValueType().is256BitVector() &&
18242 Op.getSimpleValueType().isInteger() &&
18243 "Only handle AVX 256-bit vector integer operation");
18244 return Lower256IntArith(Op, DAG);
18247 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18248 assert(Op.getSimpleValueType().is256BitVector() &&
18249 Op.getSimpleValueType().isInteger() &&
18250 "Only handle AVX 256-bit vector integer operation");
18251 return Lower256IntArith(Op, DAG);
18254 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18255 SelectionDAG &DAG) {
18257 MVT VT = Op.getSimpleValueType();
18259 // Decompose 256-bit ops into smaller 128-bit ops.
18260 if (VT.is256BitVector() && !Subtarget->hasInt256())
18261 return Lower256IntArith(Op, DAG);
18263 SDValue A = Op.getOperand(0);
18264 SDValue B = Op.getOperand(1);
18266 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18267 if (VT == MVT::v4i32) {
18268 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18269 "Should not custom lower when pmuldq is available!");
18271 // Extract the odd parts.
18272 static const int UnpackMask[] = { 1, -1, 3, -1 };
18273 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18274 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18276 // Multiply the even parts.
18277 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18278 // Now multiply odd parts.
18279 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18281 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18282 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18284 // Merge the two vectors back together with a shuffle. This expands into 2
18286 static const int ShufMask[] = { 0, 4, 2, 6 };
18287 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18290 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18291 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18293 // Ahi = psrlqi(a, 32);
18294 // Bhi = psrlqi(b, 32);
18296 // AloBlo = pmuludq(a, b);
18297 // AloBhi = pmuludq(a, Bhi);
18298 // AhiBlo = pmuludq(Ahi, b);
18300 // AloBhi = psllqi(AloBhi, 32);
18301 // AhiBlo = psllqi(AhiBlo, 32);
18302 // return AloBlo + AloBhi + AhiBlo;
18304 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18305 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18307 // Bit cast to 32-bit vectors for MULUDQ
18308 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18309 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18310 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18311 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18312 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18313 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18315 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18316 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18317 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18319 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18320 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18322 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18323 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18326 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18327 assert(Subtarget->isTargetWin64() && "Unexpected target");
18328 EVT VT = Op.getValueType();
18329 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18330 "Unexpected return type for lowering");
18334 switch (Op->getOpcode()) {
18335 default: llvm_unreachable("Unexpected request for libcall!");
18336 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18337 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18338 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18339 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18340 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18341 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18345 SDValue InChain = DAG.getEntryNode();
18347 TargetLowering::ArgListTy Args;
18348 TargetLowering::ArgListEntry Entry;
18349 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18350 EVT ArgVT = Op->getOperand(i).getValueType();
18351 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18352 "Unexpected argument type for lowering");
18353 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18354 Entry.Node = StackPtr;
18355 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18357 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18358 Entry.Ty = PointerType::get(ArgTy,0);
18359 Entry.isSExt = false;
18360 Entry.isZExt = false;
18361 Args.push_back(Entry);
18364 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18367 TargetLowering::CallLoweringInfo CLI(DAG);
18368 CLI.setDebugLoc(dl).setChain(InChain)
18369 .setCallee(getLibcallCallingConv(LC),
18370 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18371 Callee, std::move(Args), 0)
18372 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18374 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18375 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18378 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18379 SelectionDAG &DAG) {
18380 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18381 EVT VT = Op0.getValueType();
18384 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18385 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18387 // PMULxD operations multiply each even value (starting at 0) of LHS with
18388 // the related value of RHS and produce a widen result.
18389 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18390 // => <2 x i64> <ae|cg>
18392 // In other word, to have all the results, we need to perform two PMULxD:
18393 // 1. one with the even values.
18394 // 2. one with the odd values.
18395 // To achieve #2, with need to place the odd values at an even position.
18397 // Place the odd value at an even position (basically, shift all values 1
18398 // step to the left):
18399 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18400 // <a|b|c|d> => <b|undef|d|undef>
18401 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18402 // <e|f|g|h> => <f|undef|h|undef>
18403 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18405 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18407 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18408 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18410 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18411 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18412 // => <2 x i64> <ae|cg>
18413 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18414 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18415 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18416 // => <2 x i64> <bf|dh>
18417 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18418 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18420 // Shuffle it back into the right order.
18421 SDValue Highs, Lows;
18422 if (VT == MVT::v8i32) {
18423 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18424 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18425 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18426 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18428 const int HighMask[] = {1, 5, 3, 7};
18429 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18430 const int LowMask[] = {0, 4, 2, 6};
18431 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18434 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18435 // unsigned multiply.
18436 if (IsSigned && !Subtarget->hasSSE41()) {
18438 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18439 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18440 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18441 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18442 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18444 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18445 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18448 // The first result of MUL_LOHI is actually the low value, followed by the
18450 SDValue Ops[] = {Lows, Highs};
18451 return DAG.getMergeValues(Ops, dl);
18454 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18455 const X86Subtarget *Subtarget) {
18456 MVT VT = Op.getSimpleValueType();
18458 SDValue R = Op.getOperand(0);
18459 SDValue Amt = Op.getOperand(1);
18461 // Optimize shl/srl/sra with constant shift amount.
18462 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18463 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18464 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18466 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18467 (Subtarget->hasInt256() &&
18468 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18469 (Subtarget->hasAVX512() &&
18470 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18471 if (Op.getOpcode() == ISD::SHL)
18472 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18474 if (Op.getOpcode() == ISD::SRL)
18475 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18477 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18478 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18482 if (VT == MVT::v16i8) {
18483 if (Op.getOpcode() == ISD::SHL) {
18484 // Make a large shift.
18485 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18486 MVT::v8i16, R, ShiftAmt,
18488 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18489 // Zero out the rightmost bits.
18490 SmallVector<SDValue, 16> V(16,
18491 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18493 return DAG.getNode(ISD::AND, dl, VT, SHL,
18494 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18496 if (Op.getOpcode() == ISD::SRL) {
18497 // Make a large shift.
18498 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18499 MVT::v8i16, R, ShiftAmt,
18501 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18502 // Zero out the leftmost bits.
18503 SmallVector<SDValue, 16> V(16,
18504 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18506 return DAG.getNode(ISD::AND, dl, VT, SRL,
18507 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18509 if (Op.getOpcode() == ISD::SRA) {
18510 if (ShiftAmt == 7) {
18511 // R s>> 7 === R s< 0
18512 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18513 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18516 // R s>> a === ((R u>> a) ^ m) - m
18517 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18518 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18520 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18521 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18522 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18525 llvm_unreachable("Unknown shift opcode.");
18528 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18529 if (Op.getOpcode() == ISD::SHL) {
18530 // Make a large shift.
18531 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18532 MVT::v16i16, R, ShiftAmt,
18534 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18535 // Zero out the rightmost bits.
18536 SmallVector<SDValue, 32> V(32,
18537 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18539 return DAG.getNode(ISD::AND, dl, VT, SHL,
18540 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18542 if (Op.getOpcode() == ISD::SRL) {
18543 // Make a large shift.
18544 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18545 MVT::v16i16, R, ShiftAmt,
18547 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18548 // Zero out the leftmost bits.
18549 SmallVector<SDValue, 32> V(32,
18550 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18552 return DAG.getNode(ISD::AND, dl, VT, SRL,
18553 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18555 if (Op.getOpcode() == ISD::SRA) {
18556 if (ShiftAmt == 7) {
18557 // R s>> 7 === R s< 0
18558 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18559 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18562 // R s>> a === ((R u>> a) ^ m) - m
18563 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18564 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18566 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18567 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18568 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18571 llvm_unreachable("Unknown shift opcode.");
18576 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18577 if (!Subtarget->is64Bit() &&
18578 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18579 Amt.getOpcode() == ISD::BITCAST &&
18580 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18581 Amt = Amt.getOperand(0);
18582 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18583 VT.getVectorNumElements();
18584 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18585 uint64_t ShiftAmt = 0;
18586 for (unsigned i = 0; i != Ratio; ++i) {
18587 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18591 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18593 // Check remaining shift amounts.
18594 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18595 uint64_t ShAmt = 0;
18596 for (unsigned j = 0; j != Ratio; ++j) {
18597 ConstantSDNode *C =
18598 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18602 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18604 if (ShAmt != ShiftAmt)
18607 switch (Op.getOpcode()) {
18609 llvm_unreachable("Unknown shift opcode!");
18611 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18614 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18617 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18625 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18626 const X86Subtarget* Subtarget) {
18627 MVT VT = Op.getSimpleValueType();
18629 SDValue R = Op.getOperand(0);
18630 SDValue Amt = Op.getOperand(1);
18632 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18633 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18634 (Subtarget->hasInt256() &&
18635 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18636 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18637 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18639 EVT EltVT = VT.getVectorElementType();
18641 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18642 // Check if this build_vector node is doing a splat.
18643 // If so, then set BaseShAmt equal to the splat value.
18644 BaseShAmt = BV->getSplatValue();
18645 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18646 BaseShAmt = SDValue();
18648 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18649 Amt = Amt.getOperand(0);
18651 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18652 if (SVN && SVN->isSplat()) {
18653 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18654 SDValue InVec = Amt.getOperand(0);
18655 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18656 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18657 "Unexpected shuffle index found!");
18658 BaseShAmt = InVec.getOperand(SplatIdx);
18659 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18660 if (ConstantSDNode *C =
18661 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18662 if (C->getZExtValue() == SplatIdx)
18663 BaseShAmt = InVec.getOperand(1);
18668 // Avoid introducing an extract element from a shuffle.
18669 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18670 DAG.getIntPtrConstant(SplatIdx));
18674 if (BaseShAmt.getNode()) {
18675 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18676 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18677 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18678 else if (EltVT.bitsLT(MVT::i32))
18679 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18681 switch (Op.getOpcode()) {
18683 llvm_unreachable("Unknown shift opcode!");
18685 switch (VT.SimpleTy) {
18686 default: return SDValue();
18695 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18698 switch (VT.SimpleTy) {
18699 default: return SDValue();
18706 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18709 switch (VT.SimpleTy) {
18710 default: return SDValue();
18719 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18725 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18726 if (!Subtarget->is64Bit() &&
18727 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18728 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18729 Amt.getOpcode() == ISD::BITCAST &&
18730 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18731 Amt = Amt.getOperand(0);
18732 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18733 VT.getVectorNumElements();
18734 std::vector<SDValue> Vals(Ratio);
18735 for (unsigned i = 0; i != Ratio; ++i)
18736 Vals[i] = Amt.getOperand(i);
18737 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18738 for (unsigned j = 0; j != Ratio; ++j)
18739 if (Vals[j] != Amt.getOperand(i + j))
18742 switch (Op.getOpcode()) {
18744 llvm_unreachable("Unknown shift opcode!");
18746 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18748 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18750 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
18757 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
18758 SelectionDAG &DAG) {
18759 MVT VT = Op.getSimpleValueType();
18761 SDValue R = Op.getOperand(0);
18762 SDValue Amt = Op.getOperand(1);
18765 assert(VT.isVector() && "Custom lowering only for vector shifts!");
18766 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
18768 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
18772 V = LowerScalarVariableShift(Op, DAG, Subtarget);
18776 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
18778 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
18779 if (Subtarget->hasInt256()) {
18780 if (Op.getOpcode() == ISD::SRL &&
18781 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18782 VT == MVT::v4i64 || VT == MVT::v8i32))
18784 if (Op.getOpcode() == ISD::SHL &&
18785 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18786 VT == MVT::v4i64 || VT == MVT::v8i32))
18788 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
18792 // If possible, lower this packed shift into a vector multiply instead of
18793 // expanding it into a sequence of scalar shifts.
18794 // Do this only if the vector shift count is a constant build_vector.
18795 if (Op.getOpcode() == ISD::SHL &&
18796 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
18797 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
18798 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18799 SmallVector<SDValue, 8> Elts;
18800 EVT SVT = VT.getScalarType();
18801 unsigned SVTBits = SVT.getSizeInBits();
18802 const APInt &One = APInt(SVTBits, 1);
18803 unsigned NumElems = VT.getVectorNumElements();
18805 for (unsigned i=0; i !=NumElems; ++i) {
18806 SDValue Op = Amt->getOperand(i);
18807 if (Op->getOpcode() == ISD::UNDEF) {
18808 Elts.push_back(Op);
18812 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
18813 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
18814 uint64_t ShAmt = C.getZExtValue();
18815 if (ShAmt >= SVTBits) {
18816 Elts.push_back(DAG.getUNDEF(SVT));
18819 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
18821 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
18822 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
18825 // Lower SHL with variable shift amount.
18826 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
18827 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
18829 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
18830 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
18831 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
18832 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
18835 // If possible, lower this shift as a sequence of two shifts by
18836 // constant plus a MOVSS/MOVSD instead of scalarizing it.
18838 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
18840 // Could be rewritten as:
18841 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
18843 // The advantage is that the two shifts from the example would be
18844 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
18845 // the vector shift into four scalar shifts plus four pairs of vector
18847 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
18848 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18849 unsigned TargetOpcode = X86ISD::MOVSS;
18850 bool CanBeSimplified;
18851 // The splat value for the first packed shift (the 'X' from the example).
18852 SDValue Amt1 = Amt->getOperand(0);
18853 // The splat value for the second packed shift (the 'Y' from the example).
18854 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
18855 Amt->getOperand(2);
18857 // See if it is possible to replace this node with a sequence of
18858 // two shifts followed by a MOVSS/MOVSD
18859 if (VT == MVT::v4i32) {
18860 // Check if it is legal to use a MOVSS.
18861 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
18862 Amt2 == Amt->getOperand(3);
18863 if (!CanBeSimplified) {
18864 // Otherwise, check if we can still simplify this node using a MOVSD.
18865 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
18866 Amt->getOperand(2) == Amt->getOperand(3);
18867 TargetOpcode = X86ISD::MOVSD;
18868 Amt2 = Amt->getOperand(2);
18871 // Do similar checks for the case where the machine value type
18873 CanBeSimplified = Amt1 == Amt->getOperand(1);
18874 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
18875 CanBeSimplified = Amt2 == Amt->getOperand(i);
18877 if (!CanBeSimplified) {
18878 TargetOpcode = X86ISD::MOVSD;
18879 CanBeSimplified = true;
18880 Amt2 = Amt->getOperand(4);
18881 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
18882 CanBeSimplified = Amt1 == Amt->getOperand(i);
18883 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
18884 CanBeSimplified = Amt2 == Amt->getOperand(j);
18888 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
18889 isa<ConstantSDNode>(Amt2)) {
18890 // Replace this node with two shifts followed by a MOVSS/MOVSD.
18891 EVT CastVT = MVT::v4i32;
18893 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
18894 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
18896 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
18897 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
18898 if (TargetOpcode == X86ISD::MOVSD)
18899 CastVT = MVT::v2i64;
18900 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
18901 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
18902 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
18904 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
18908 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
18909 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
18912 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
18913 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
18915 // Turn 'a' into a mask suitable for VSELECT
18916 SDValue VSelM = DAG.getConstant(0x80, VT);
18917 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18918 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18920 SDValue CM1 = DAG.getConstant(0x0f, VT);
18921 SDValue CM2 = DAG.getConstant(0x3f, VT);
18923 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
18924 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
18925 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
18926 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
18927 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
18930 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
18931 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18932 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18934 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
18935 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
18936 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
18937 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
18938 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
18941 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
18942 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18943 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18945 // return VSELECT(r, r+r, a);
18946 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
18947 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
18951 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
18952 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
18953 // solution better.
18954 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
18955 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
18957 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
18958 R = DAG.getNode(ExtOpc, dl, NewVT, R);
18959 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
18960 return DAG.getNode(ISD::TRUNCATE, dl, VT,
18961 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
18964 // Decompose 256-bit shifts into smaller 128-bit shifts.
18965 if (VT.is256BitVector()) {
18966 unsigned NumElems = VT.getVectorNumElements();
18967 MVT EltVT = VT.getVectorElementType();
18968 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18970 // Extract the two vectors
18971 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
18972 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
18974 // Recreate the shift amount vectors
18975 SDValue Amt1, Amt2;
18976 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
18977 // Constant shift amount
18978 SmallVector<SDValue, 4> Amt1Csts;
18979 SmallVector<SDValue, 4> Amt2Csts;
18980 for (unsigned i = 0; i != NumElems/2; ++i)
18981 Amt1Csts.push_back(Amt->getOperand(i));
18982 for (unsigned i = NumElems/2; i != NumElems; ++i)
18983 Amt2Csts.push_back(Amt->getOperand(i));
18985 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
18986 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
18988 // Variable shift amount
18989 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
18990 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
18993 // Issue new vector shifts for the smaller types
18994 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
18995 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
18997 // Concatenate the result back
18998 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19004 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19005 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19006 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19007 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19008 // has only one use.
19009 SDNode *N = Op.getNode();
19010 SDValue LHS = N->getOperand(0);
19011 SDValue RHS = N->getOperand(1);
19012 unsigned BaseOp = 0;
19015 switch (Op.getOpcode()) {
19016 default: llvm_unreachable("Unknown ovf instruction!");
19018 // A subtract of one will be selected as a INC. Note that INC doesn't
19019 // set CF, so we can't do this for UADDO.
19020 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19022 BaseOp = X86ISD::INC;
19023 Cond = X86::COND_O;
19026 BaseOp = X86ISD::ADD;
19027 Cond = X86::COND_O;
19030 BaseOp = X86ISD::ADD;
19031 Cond = X86::COND_B;
19034 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19035 // set CF, so we can't do this for USUBO.
19036 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19038 BaseOp = X86ISD::DEC;
19039 Cond = X86::COND_O;
19042 BaseOp = X86ISD::SUB;
19043 Cond = X86::COND_O;
19046 BaseOp = X86ISD::SUB;
19047 Cond = X86::COND_B;
19050 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19051 Cond = X86::COND_O;
19053 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19054 if (N->getValueType(0) == MVT::i8) {
19055 BaseOp = X86ISD::UMUL8;
19056 Cond = X86::COND_O;
19059 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19061 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19064 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19065 DAG.getConstant(X86::COND_O, MVT::i32),
19066 SDValue(Sum.getNode(), 2));
19068 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19072 // Also sets EFLAGS.
19073 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19074 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19077 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19078 DAG.getConstant(Cond, MVT::i32),
19079 SDValue(Sum.getNode(), 1));
19081 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19084 // Sign extension of the low part of vector elements. This may be used either
19085 // when sign extend instructions are not available or if the vector element
19086 // sizes already match the sign-extended size. If the vector elements are in
19087 // their pre-extended size and sign extend instructions are available, that will
19088 // be handled by LowerSIGN_EXTEND.
19089 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19090 SelectionDAG &DAG) const {
19092 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19093 MVT VT = Op.getSimpleValueType();
19095 if (!Subtarget->hasSSE2() || !VT.isVector())
19098 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19099 ExtraVT.getScalarType().getSizeInBits();
19101 switch (VT.SimpleTy) {
19102 default: return SDValue();
19105 if (!Subtarget->hasFp256())
19107 if (!Subtarget->hasInt256()) {
19108 // needs to be split
19109 unsigned NumElems = VT.getVectorNumElements();
19111 // Extract the LHS vectors
19112 SDValue LHS = Op.getOperand(0);
19113 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19114 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19116 MVT EltVT = VT.getVectorElementType();
19117 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19119 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19120 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19121 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19123 SDValue Extra = DAG.getValueType(ExtraVT);
19125 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19126 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19128 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19133 SDValue Op0 = Op.getOperand(0);
19135 // This is a sign extension of some low part of vector elements without
19136 // changing the size of the vector elements themselves:
19137 // Shift-Left + Shift-Right-Algebraic.
19138 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19140 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19146 /// Returns true if the operand type is exactly twice the native width, and
19147 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19148 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19149 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19150 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19151 const X86Subtarget &Subtarget =
19152 getTargetMachine().getSubtarget<X86Subtarget>();
19153 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19156 return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19157 else if (OpWidth == 128)
19158 return Subtarget.hasCmpxchg16b();
19163 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19164 return needsCmpXchgNb(SI->getValueOperand()->getType());
19167 // Note: this turns large loads into lock cmpxchg8b/16b.
19168 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19169 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19170 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19171 return needsCmpXchgNb(PTy->getElementType());
19174 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19175 const X86Subtarget &Subtarget =
19176 getTargetMachine().getSubtarget<X86Subtarget>();
19177 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
19178 const Type *MemType = AI->getType();
19180 // If the operand is too big, we must see if cmpxchg8/16b is available
19181 // and default to library calls otherwise.
19182 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19183 return needsCmpXchgNb(MemType);
19185 AtomicRMWInst::BinOp Op = AI->getOperation();
19188 llvm_unreachable("Unknown atomic operation");
19189 case AtomicRMWInst::Xchg:
19190 case AtomicRMWInst::Add:
19191 case AtomicRMWInst::Sub:
19192 // It's better to use xadd, xsub or xchg for these in all cases.
19194 case AtomicRMWInst::Or:
19195 case AtomicRMWInst::And:
19196 case AtomicRMWInst::Xor:
19197 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19198 // prefix to a normal instruction for these operations.
19199 return !AI->use_empty();
19200 case AtomicRMWInst::Nand:
19201 case AtomicRMWInst::Max:
19202 case AtomicRMWInst::Min:
19203 case AtomicRMWInst::UMax:
19204 case AtomicRMWInst::UMin:
19205 // These always require a non-trivial set of data operations on x86. We must
19206 // use a cmpxchg loop.
19211 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19212 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19213 // no-sse2). There isn't any reason to disable it if the target processor
19215 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19219 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19220 const X86Subtarget &Subtarget =
19221 getTargetMachine().getSubtarget<X86Subtarget>();
19222 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
19223 const Type *MemType = AI->getType();
19224 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19225 // there is no benefit in turning such RMWs into loads, and it is actually
19226 // harmful as it introduces a mfence.
19227 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19230 auto Builder = IRBuilder<>(AI);
19231 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19232 auto SynchScope = AI->getSynchScope();
19233 // We must restrict the ordering to avoid generating loads with Release or
19234 // ReleaseAcquire orderings.
19235 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19236 auto Ptr = AI->getPointerOperand();
19238 // Before the load we need a fence. Here is an example lifted from
19239 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19242 // x.store(1, relaxed);
19243 // r1 = y.fetch_add(0, release);
19245 // y.fetch_add(42, acquire);
19246 // r2 = x.load(relaxed);
19247 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19248 // lowered to just a load without a fence. A mfence flushes the store buffer,
19249 // making the optimization clearly correct.
19250 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19251 // otherwise, we might be able to be more agressive on relaxed idempotent
19252 // rmw. In practice, they do not look useful, so we don't try to be
19253 // especially clever.
19254 if (SynchScope == SingleThread) {
19255 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19256 // the IR level, so we must wrap it in an intrinsic.
19258 } else if (hasMFENCE(Subtarget)) {
19259 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19260 Intrinsic::x86_sse2_mfence);
19261 Builder.CreateCall(MFence);
19263 // FIXME: it might make sense to use a locked operation here but on a
19264 // different cache-line to prevent cache-line bouncing. In practice it
19265 // is probably a small win, and x86 processors without mfence are rare
19266 // enough that we do not bother.
19270 // Finally we can emit the atomic load.
19271 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19272 AI->getType()->getPrimitiveSizeInBits());
19273 Loaded->setAtomic(Order, SynchScope);
19274 AI->replaceAllUsesWith(Loaded);
19275 AI->eraseFromParent();
19279 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19280 SelectionDAG &DAG) {
19282 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19283 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19284 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19285 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19287 // The only fence that needs an instruction is a sequentially-consistent
19288 // cross-thread fence.
19289 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19290 if (hasMFENCE(*Subtarget))
19291 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19293 SDValue Chain = Op.getOperand(0);
19294 SDValue Zero = DAG.getConstant(0, MVT::i32);
19296 DAG.getRegister(X86::ESP, MVT::i32), // Base
19297 DAG.getTargetConstant(1, MVT::i8), // Scale
19298 DAG.getRegister(0, MVT::i32), // Index
19299 DAG.getTargetConstant(0, MVT::i32), // Disp
19300 DAG.getRegister(0, MVT::i32), // Segment.
19304 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19305 return SDValue(Res, 0);
19308 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19309 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19312 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19313 SelectionDAG &DAG) {
19314 MVT T = Op.getSimpleValueType();
19318 switch(T.SimpleTy) {
19319 default: llvm_unreachable("Invalid value type!");
19320 case MVT::i8: Reg = X86::AL; size = 1; break;
19321 case MVT::i16: Reg = X86::AX; size = 2; break;
19322 case MVT::i32: Reg = X86::EAX; size = 4; break;
19324 assert(Subtarget->is64Bit() && "Node not type legal!");
19325 Reg = X86::RAX; size = 8;
19328 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19329 Op.getOperand(2), SDValue());
19330 SDValue Ops[] = { cpIn.getValue(0),
19333 DAG.getTargetConstant(size, MVT::i8),
19334 cpIn.getValue(1) };
19335 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19336 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19337 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19341 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19342 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19343 MVT::i32, cpOut.getValue(2));
19344 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19345 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19347 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19348 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19349 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19353 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19354 SelectionDAG &DAG) {
19355 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19356 MVT DstVT = Op.getSimpleValueType();
19358 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19359 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19360 if (DstVT != MVT::f64)
19361 // This conversion needs to be expanded.
19364 SDValue InVec = Op->getOperand(0);
19366 unsigned NumElts = SrcVT.getVectorNumElements();
19367 EVT SVT = SrcVT.getVectorElementType();
19369 // Widen the vector in input in the case of MVT::v2i32.
19370 // Example: from MVT::v2i32 to MVT::v4i32.
19371 SmallVector<SDValue, 16> Elts;
19372 for (unsigned i = 0, e = NumElts; i != e; ++i)
19373 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19374 DAG.getIntPtrConstant(i)));
19376 // Explicitly mark the extra elements as Undef.
19377 SDValue Undef = DAG.getUNDEF(SVT);
19378 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19379 Elts.push_back(Undef);
19381 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19382 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19383 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19384 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19385 DAG.getIntPtrConstant(0));
19388 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19389 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19390 assert((DstVT == MVT::i64 ||
19391 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19392 "Unexpected custom BITCAST");
19393 // i64 <=> MMX conversions are Legal.
19394 if (SrcVT==MVT::i64 && DstVT.isVector())
19396 if (DstVT==MVT::i64 && SrcVT.isVector())
19398 // MMX <=> MMX conversions are Legal.
19399 if (SrcVT.isVector() && DstVT.isVector())
19401 // All other conversions need to be expanded.
19405 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19406 SelectionDAG &DAG) {
19407 SDNode *Node = Op.getNode();
19410 Op = Op.getOperand(0);
19411 EVT VT = Op.getValueType();
19412 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19413 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19415 unsigned NumElts = VT.getVectorNumElements();
19416 EVT EltVT = VT.getVectorElementType();
19417 unsigned Len = EltVT.getSizeInBits();
19419 // This is the vectorized version of the "best" algorithm from
19420 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19421 // with a minor tweak to use a series of adds + shifts instead of vector
19422 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19424 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19425 // v8i32 => Always profitable
19427 // FIXME: There a couple of possible improvements:
19429 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19430 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19432 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19433 "CTPOP not implemented for this vector element type.");
19435 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19436 // extra legalization.
19437 bool NeedsBitcast = EltVT == MVT::i32;
19438 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19440 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19441 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19442 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19444 // v = v - ((v >> 1) & 0x55555555...)
19445 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19446 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19447 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19449 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19451 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19452 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19454 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19456 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19457 if (VT != And.getValueType())
19458 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19459 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19461 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19462 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19463 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19464 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19465 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19467 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19468 if (NeedsBitcast) {
19469 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19470 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19471 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19474 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19475 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19476 if (VT != AndRHS.getValueType()) {
19477 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19478 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19480 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19482 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19483 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19484 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19485 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19486 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19488 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19489 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19490 if (NeedsBitcast) {
19491 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19492 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19494 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19495 if (VT != And.getValueType())
19496 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19498 // The algorithm mentioned above uses:
19499 // v = (v * 0x01010101...) >> (Len - 8)
19501 // Change it to use vector adds + vector shifts which yield faster results on
19502 // Haswell than using vector integer multiplication.
19504 // For i32 elements:
19505 // v = v + (v >> 8)
19506 // v = v + (v >> 16)
19508 // For i64 elements:
19509 // v = v + (v >> 8)
19510 // v = v + (v >> 16)
19511 // v = v + (v >> 32)
19514 SmallVector<SDValue, 8> Csts;
19515 for (unsigned i = 8; i <= Len/2; i *= 2) {
19516 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19517 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19518 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19519 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19523 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19524 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19525 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19526 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19527 if (NeedsBitcast) {
19528 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19529 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19531 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19532 if (VT != And.getValueType())
19533 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19538 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19539 SDNode *Node = Op.getNode();
19541 EVT T = Node->getValueType(0);
19542 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19543 DAG.getConstant(0, T), Node->getOperand(2));
19544 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19545 cast<AtomicSDNode>(Node)->getMemoryVT(),
19546 Node->getOperand(0),
19547 Node->getOperand(1), negOp,
19548 cast<AtomicSDNode>(Node)->getMemOperand(),
19549 cast<AtomicSDNode>(Node)->getOrdering(),
19550 cast<AtomicSDNode>(Node)->getSynchScope());
19553 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19554 SDNode *Node = Op.getNode();
19556 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19558 // Convert seq_cst store -> xchg
19559 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19560 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19561 // (The only way to get a 16-byte store is cmpxchg16b)
19562 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19563 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19564 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19565 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19566 cast<AtomicSDNode>(Node)->getMemoryVT(),
19567 Node->getOperand(0),
19568 Node->getOperand(1), Node->getOperand(2),
19569 cast<AtomicSDNode>(Node)->getMemOperand(),
19570 cast<AtomicSDNode>(Node)->getOrdering(),
19571 cast<AtomicSDNode>(Node)->getSynchScope());
19572 return Swap.getValue(1);
19574 // Other atomic stores have a simple pattern.
19578 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19579 EVT VT = Op.getNode()->getSimpleValueType(0);
19581 // Let legalize expand this if it isn't a legal type yet.
19582 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19585 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19588 bool ExtraOp = false;
19589 switch (Op.getOpcode()) {
19590 default: llvm_unreachable("Invalid code");
19591 case ISD::ADDC: Opc = X86ISD::ADD; break;
19592 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19593 case ISD::SUBC: Opc = X86ISD::SUB; break;
19594 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19598 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19600 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19601 Op.getOperand(1), Op.getOperand(2));
19604 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19605 SelectionDAG &DAG) {
19606 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19608 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19609 // which returns the values as { float, float } (in XMM0) or
19610 // { double, double } (which is returned in XMM0, XMM1).
19612 SDValue Arg = Op.getOperand(0);
19613 EVT ArgVT = Arg.getValueType();
19614 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19616 TargetLowering::ArgListTy Args;
19617 TargetLowering::ArgListEntry Entry;
19621 Entry.isSExt = false;
19622 Entry.isZExt = false;
19623 Args.push_back(Entry);
19625 bool isF64 = ArgVT == MVT::f64;
19626 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19627 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19628 // the results are returned via SRet in memory.
19629 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19630 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19631 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19633 Type *RetTy = isF64
19634 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19635 : (Type*)VectorType::get(ArgTy, 4);
19637 TargetLowering::CallLoweringInfo CLI(DAG);
19638 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19639 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19641 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19644 // Returned in xmm0 and xmm1.
19645 return CallResult.first;
19647 // Returned in bits 0:31 and 32:64 xmm0.
19648 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19649 CallResult.first, DAG.getIntPtrConstant(0));
19650 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19651 CallResult.first, DAG.getIntPtrConstant(1));
19652 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19653 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19656 /// LowerOperation - Provide custom lowering hooks for some operations.
19658 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19659 switch (Op.getOpcode()) {
19660 default: llvm_unreachable("Should not custom lower this!");
19661 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19662 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19663 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19664 return LowerCMP_SWAP(Op, Subtarget, DAG);
19665 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19666 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19667 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19668 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19669 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19670 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19671 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19672 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19673 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19674 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19675 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19676 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19677 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19678 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19679 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19680 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19681 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19682 case ISD::SHL_PARTS:
19683 case ISD::SRA_PARTS:
19684 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19685 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19686 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19687 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19688 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19689 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19690 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19691 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19692 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19693 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19694 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19696 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19697 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19698 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19699 case ISD::SETCC: return LowerSETCC(Op, DAG);
19700 case ISD::SELECT: return LowerSELECT(Op, DAG);
19701 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19702 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19703 case ISD::VASTART: return LowerVASTART(Op, DAG);
19704 case ISD::VAARG: return LowerVAARG(Op, DAG);
19705 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19706 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19707 case ISD::INTRINSIC_VOID:
19708 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19709 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19710 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19711 case ISD::FRAME_TO_ARGS_OFFSET:
19712 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19713 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19714 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19715 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19716 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19717 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19718 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19719 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19720 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19721 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19722 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19723 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19724 case ISD::UMUL_LOHI:
19725 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19728 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19734 case ISD::UMULO: return LowerXALUO(Op, DAG);
19735 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19736 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19740 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19741 case ISD::ADD: return LowerADD(Op, DAG);
19742 case ISD::SUB: return LowerSUB(Op, DAG);
19743 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19747 /// ReplaceNodeResults - Replace a node with an illegal result type
19748 /// with a new node built out of custom code.
19749 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19750 SmallVectorImpl<SDValue>&Results,
19751 SelectionDAG &DAG) const {
19753 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19754 switch (N->getOpcode()) {
19756 llvm_unreachable("Do not know how to custom type legalize this operation!");
19757 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
19758 case X86ISD::FMINC:
19760 case X86ISD::FMAXC:
19761 case X86ISD::FMAX: {
19762 EVT VT = N->getValueType(0);
19763 if (VT != MVT::v2f32)
19764 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
19765 SDValue UNDEF = DAG.getUNDEF(VT);
19766 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19767 N->getOperand(0), UNDEF);
19768 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19769 N->getOperand(1), UNDEF);
19770 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
19773 case ISD::SIGN_EXTEND_INREG:
19778 // We don't want to expand or promote these.
19785 case ISD::UDIVREM: {
19786 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
19787 Results.push_back(V);
19790 case ISD::FP_TO_SINT:
19791 case ISD::FP_TO_UINT: {
19792 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
19794 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
19797 std::pair<SDValue,SDValue> Vals =
19798 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
19799 SDValue FIST = Vals.first, StackSlot = Vals.second;
19800 if (FIST.getNode()) {
19801 EVT VT = N->getValueType(0);
19802 // Return a load from the stack slot.
19803 if (StackSlot.getNode())
19804 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
19805 MachinePointerInfo(),
19806 false, false, false, 0));
19808 Results.push_back(FIST);
19812 case ISD::UINT_TO_FP: {
19813 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19814 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
19815 N->getValueType(0) != MVT::v2f32)
19817 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
19819 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
19821 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
19822 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
19823 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
19824 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
19825 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
19826 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
19829 case ISD::FP_ROUND: {
19830 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
19832 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
19833 Results.push_back(V);
19836 case ISD::INTRINSIC_W_CHAIN: {
19837 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19839 default : llvm_unreachable("Do not know how to custom type "
19840 "legalize this intrinsic operation!");
19841 case Intrinsic::x86_rdtsc:
19842 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19844 case Intrinsic::x86_rdtscp:
19845 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
19847 case Intrinsic::x86_rdpmc:
19848 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
19851 case ISD::READCYCLECOUNTER: {
19852 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19855 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
19856 EVT T = N->getValueType(0);
19857 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
19858 bool Regs64bit = T == MVT::i128;
19859 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
19860 SDValue cpInL, cpInH;
19861 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19862 DAG.getConstant(0, HalfT));
19863 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19864 DAG.getConstant(1, HalfT));
19865 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
19866 Regs64bit ? X86::RAX : X86::EAX,
19868 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
19869 Regs64bit ? X86::RDX : X86::EDX,
19870 cpInH, cpInL.getValue(1));
19871 SDValue swapInL, swapInH;
19872 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19873 DAG.getConstant(0, HalfT));
19874 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19875 DAG.getConstant(1, HalfT));
19876 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
19877 Regs64bit ? X86::RBX : X86::EBX,
19878 swapInL, cpInH.getValue(1));
19879 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
19880 Regs64bit ? X86::RCX : X86::ECX,
19881 swapInH, swapInL.getValue(1));
19882 SDValue Ops[] = { swapInH.getValue(0),
19884 swapInH.getValue(1) };
19885 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19886 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
19887 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
19888 X86ISD::LCMPXCHG8_DAG;
19889 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
19890 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
19891 Regs64bit ? X86::RAX : X86::EAX,
19892 HalfT, Result.getValue(1));
19893 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
19894 Regs64bit ? X86::RDX : X86::EDX,
19895 HalfT, cpOutL.getValue(2));
19896 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
19898 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
19899 MVT::i32, cpOutH.getValue(2));
19901 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19902 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19903 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
19905 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
19906 Results.push_back(Success);
19907 Results.push_back(EFLAGS.getValue(1));
19910 case ISD::ATOMIC_SWAP:
19911 case ISD::ATOMIC_LOAD_ADD:
19912 case ISD::ATOMIC_LOAD_SUB:
19913 case ISD::ATOMIC_LOAD_AND:
19914 case ISD::ATOMIC_LOAD_OR:
19915 case ISD::ATOMIC_LOAD_XOR:
19916 case ISD::ATOMIC_LOAD_NAND:
19917 case ISD::ATOMIC_LOAD_MIN:
19918 case ISD::ATOMIC_LOAD_MAX:
19919 case ISD::ATOMIC_LOAD_UMIN:
19920 case ISD::ATOMIC_LOAD_UMAX:
19921 case ISD::ATOMIC_LOAD: {
19922 // Delegate to generic TypeLegalization. Situations we can really handle
19923 // should have already been dealt with by AtomicExpandPass.cpp.
19926 case ISD::BITCAST: {
19927 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19928 EVT DstVT = N->getValueType(0);
19929 EVT SrcVT = N->getOperand(0)->getValueType(0);
19931 if (SrcVT != MVT::f64 ||
19932 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
19935 unsigned NumElts = DstVT.getVectorNumElements();
19936 EVT SVT = DstVT.getVectorElementType();
19937 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19938 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
19939 MVT::v2f64, N->getOperand(0));
19940 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
19942 if (ExperimentalVectorWideningLegalization) {
19943 // If we are legalizing vectors by widening, we already have the desired
19944 // legal vector type, just return it.
19945 Results.push_back(ToVecInt);
19949 SmallVector<SDValue, 8> Elts;
19950 for (unsigned i = 0, e = NumElts; i != e; ++i)
19951 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
19952 ToVecInt, DAG.getIntPtrConstant(i)));
19954 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
19959 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
19961 default: return nullptr;
19962 case X86ISD::BSF: return "X86ISD::BSF";
19963 case X86ISD::BSR: return "X86ISD::BSR";
19964 case X86ISD::SHLD: return "X86ISD::SHLD";
19965 case X86ISD::SHRD: return "X86ISD::SHRD";
19966 case X86ISD::FAND: return "X86ISD::FAND";
19967 case X86ISD::FANDN: return "X86ISD::FANDN";
19968 case X86ISD::FOR: return "X86ISD::FOR";
19969 case X86ISD::FXOR: return "X86ISD::FXOR";
19970 case X86ISD::FSRL: return "X86ISD::FSRL";
19971 case X86ISD::FILD: return "X86ISD::FILD";
19972 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
19973 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
19974 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
19975 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
19976 case X86ISD::FLD: return "X86ISD::FLD";
19977 case X86ISD::FST: return "X86ISD::FST";
19978 case X86ISD::CALL: return "X86ISD::CALL";
19979 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
19980 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
19981 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
19982 case X86ISD::BT: return "X86ISD::BT";
19983 case X86ISD::CMP: return "X86ISD::CMP";
19984 case X86ISD::COMI: return "X86ISD::COMI";
19985 case X86ISD::UCOMI: return "X86ISD::UCOMI";
19986 case X86ISD::CMPM: return "X86ISD::CMPM";
19987 case X86ISD::CMPMU: return "X86ISD::CMPMU";
19988 case X86ISD::SETCC: return "X86ISD::SETCC";
19989 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
19990 case X86ISD::FSETCC: return "X86ISD::FSETCC";
19991 case X86ISD::CMOV: return "X86ISD::CMOV";
19992 case X86ISD::BRCOND: return "X86ISD::BRCOND";
19993 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
19994 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
19995 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
19996 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
19997 case X86ISD::Wrapper: return "X86ISD::Wrapper";
19998 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
19999 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20000 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20001 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20002 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20003 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20004 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20005 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20006 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20007 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20008 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20009 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20010 case X86ISD::HADD: return "X86ISD::HADD";
20011 case X86ISD::HSUB: return "X86ISD::HSUB";
20012 case X86ISD::FHADD: return "X86ISD::FHADD";
20013 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20014 case X86ISD::UMAX: return "X86ISD::UMAX";
20015 case X86ISD::UMIN: return "X86ISD::UMIN";
20016 case X86ISD::SMAX: return "X86ISD::SMAX";
20017 case X86ISD::SMIN: return "X86ISD::SMIN";
20018 case X86ISD::FMAX: return "X86ISD::FMAX";
20019 case X86ISD::FMIN: return "X86ISD::FMIN";
20020 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20021 case X86ISD::FMINC: return "X86ISD::FMINC";
20022 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20023 case X86ISD::FRCP: return "X86ISD::FRCP";
20024 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20025 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20026 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20027 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20028 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20029 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20030 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20031 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20032 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20033 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20034 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20035 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20036 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20037 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20038 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20039 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20040 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20041 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20042 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20043 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20044 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20045 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20046 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20047 case X86ISD::VSHL: return "X86ISD::VSHL";
20048 case X86ISD::VSRL: return "X86ISD::VSRL";
20049 case X86ISD::VSRA: return "X86ISD::VSRA";
20050 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20051 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20052 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20053 case X86ISD::CMPP: return "X86ISD::CMPP";
20054 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20055 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20056 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20057 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20058 case X86ISD::ADD: return "X86ISD::ADD";
20059 case X86ISD::SUB: return "X86ISD::SUB";
20060 case X86ISD::ADC: return "X86ISD::ADC";
20061 case X86ISD::SBB: return "X86ISD::SBB";
20062 case X86ISD::SMUL: return "X86ISD::SMUL";
20063 case X86ISD::UMUL: return "X86ISD::UMUL";
20064 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20065 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20066 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20067 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20068 case X86ISD::INC: return "X86ISD::INC";
20069 case X86ISD::DEC: return "X86ISD::DEC";
20070 case X86ISD::OR: return "X86ISD::OR";
20071 case X86ISD::XOR: return "X86ISD::XOR";
20072 case X86ISD::AND: return "X86ISD::AND";
20073 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20074 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20075 case X86ISD::PTEST: return "X86ISD::PTEST";
20076 case X86ISD::TESTP: return "X86ISD::TESTP";
20077 case X86ISD::TESTM: return "X86ISD::TESTM";
20078 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20079 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20080 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20081 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20082 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20083 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20084 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20085 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20086 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20087 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20088 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20089 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20090 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20091 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20092 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20093 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20094 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20095 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20096 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20097 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20098 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20099 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20100 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20101 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20102 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20103 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20104 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20105 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20106 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20107 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20108 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20109 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20110 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20111 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20112 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20113 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20114 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20115 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20116 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20117 case X86ISD::SAHF: return "X86ISD::SAHF";
20118 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20119 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20120 case X86ISD::FMADD: return "X86ISD::FMADD";
20121 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20122 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20123 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20124 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20125 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20126 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20127 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20128 case X86ISD::XTEST: return "X86ISD::XTEST";
20129 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20130 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20131 case X86ISD::SELECT: return "X86ISD::SELECT";
20135 // isLegalAddressingMode - Return true if the addressing mode represented
20136 // by AM is legal for this target, for a load/store of the specified type.
20137 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20139 // X86 supports extremely general addressing modes.
20140 CodeModel::Model M = getTargetMachine().getCodeModel();
20141 Reloc::Model R = getTargetMachine().getRelocationModel();
20143 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20144 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20149 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20151 // If a reference to this global requires an extra load, we can't fold it.
20152 if (isGlobalStubReference(GVFlags))
20155 // If BaseGV requires a register for the PIC base, we cannot also have a
20156 // BaseReg specified.
20157 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20160 // If lower 4G is not available, then we must use rip-relative addressing.
20161 if ((M != CodeModel::Small || R != Reloc::Static) &&
20162 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20166 switch (AM.Scale) {
20172 // These scales always work.
20177 // These scales are formed with basereg+scalereg. Only accept if there is
20182 default: // Other stuff never works.
20189 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20190 unsigned Bits = Ty->getScalarSizeInBits();
20192 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20193 // particularly cheaper than those without.
20197 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20198 // variable shifts just as cheap as scalar ones.
20199 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20202 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20203 // fully general vector.
20207 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20208 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20210 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20211 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20212 return NumBits1 > NumBits2;
20215 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20216 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20219 if (!isTypeLegal(EVT::getEVT(Ty1)))
20222 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20224 // Assuming the caller doesn't have a zeroext or signext return parameter,
20225 // truncation all the way down to i1 is valid.
20229 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20230 return isInt<32>(Imm);
20233 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20234 // Can also use sub to handle negated immediates.
20235 return isInt<32>(Imm);
20238 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20239 if (!VT1.isInteger() || !VT2.isInteger())
20241 unsigned NumBits1 = VT1.getSizeInBits();
20242 unsigned NumBits2 = VT2.getSizeInBits();
20243 return NumBits1 > NumBits2;
20246 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20247 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20248 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20251 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20252 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20253 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20256 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20257 EVT VT1 = Val.getValueType();
20258 if (isZExtFree(VT1, VT2))
20261 if (Val.getOpcode() != ISD::LOAD)
20264 if (!VT1.isSimple() || !VT1.isInteger() ||
20265 !VT2.isSimple() || !VT2.isInteger())
20268 switch (VT1.getSimpleVT().SimpleTy) {
20273 // X86 has 8, 16, and 32-bit zero-extending loads.
20281 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20282 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20285 VT = VT.getScalarType();
20287 if (!VT.isSimple())
20290 switch (VT.getSimpleVT().SimpleTy) {
20301 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20302 // i16 instructions are longer (0x66 prefix) and potentially slower.
20303 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20306 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20307 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20308 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20309 /// are assumed to be legal.
20311 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20313 if (!VT.isSimple())
20316 MVT SVT = VT.getSimpleVT();
20318 // Very little shuffling can be done for 64-bit vectors right now.
20319 if (VT.getSizeInBits() == 64)
20322 // This is an experimental legality test that is tailored to match the
20323 // legality test of the experimental lowering more closely. They are gated
20324 // separately to ease testing of performance differences.
20325 if (ExperimentalVectorShuffleLegality)
20326 // We only care that the types being shuffled are legal. The lowering can
20327 // handle any possible shuffle mask that results.
20328 return isTypeLegal(SVT);
20330 // If this is a single-input shuffle with no 128 bit lane crossings we can
20331 // lower it into pshufb.
20332 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20333 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20334 bool isLegal = true;
20335 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20336 if (M[I] >= (int)SVT.getVectorNumElements() ||
20337 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20346 // FIXME: blends, shifts.
20347 return (SVT.getVectorNumElements() == 2 ||
20348 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20349 isMOVLMask(M, SVT) ||
20350 isCommutedMOVLMask(M, SVT) ||
20351 isMOVHLPSMask(M, SVT) ||
20352 isSHUFPMask(M, SVT) ||
20353 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20354 isPSHUFDMask(M, SVT) ||
20355 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20356 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20357 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20358 isPALIGNRMask(M, SVT, Subtarget) ||
20359 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20360 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20361 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20362 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20363 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20364 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20368 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20370 if (!VT.isSimple())
20373 MVT SVT = VT.getSimpleVT();
20375 // This is an experimental legality test that is tailored to match the
20376 // legality test of the experimental lowering more closely. They are gated
20377 // separately to ease testing of performance differences.
20378 if (ExperimentalVectorShuffleLegality)
20379 // The new vector shuffle lowering is very good at managing zero-inputs.
20380 return isShuffleMaskLegal(Mask, VT);
20382 unsigned NumElts = SVT.getVectorNumElements();
20383 // FIXME: This collection of masks seems suspect.
20386 if (NumElts == 4 && SVT.is128BitVector()) {
20387 return (isMOVLMask(Mask, SVT) ||
20388 isCommutedMOVLMask(Mask, SVT, true) ||
20389 isSHUFPMask(Mask, SVT) ||
20390 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20391 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20392 Subtarget->hasInt256()));
20397 //===----------------------------------------------------------------------===//
20398 // X86 Scheduler Hooks
20399 //===----------------------------------------------------------------------===//
20401 /// Utility function to emit xbegin specifying the start of an RTM region.
20402 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20403 const TargetInstrInfo *TII) {
20404 DebugLoc DL = MI->getDebugLoc();
20406 const BasicBlock *BB = MBB->getBasicBlock();
20407 MachineFunction::iterator I = MBB;
20410 // For the v = xbegin(), we generate
20421 MachineBasicBlock *thisMBB = MBB;
20422 MachineFunction *MF = MBB->getParent();
20423 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20424 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20425 MF->insert(I, mainMBB);
20426 MF->insert(I, sinkMBB);
20428 // Transfer the remainder of BB and its successor edges to sinkMBB.
20429 sinkMBB->splice(sinkMBB->begin(), MBB,
20430 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20431 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20435 // # fallthrough to mainMBB
20436 // # abortion to sinkMBB
20437 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20438 thisMBB->addSuccessor(mainMBB);
20439 thisMBB->addSuccessor(sinkMBB);
20443 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20444 mainMBB->addSuccessor(sinkMBB);
20447 // EAX is live into the sinkMBB
20448 sinkMBB->addLiveIn(X86::EAX);
20449 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20450 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20453 MI->eraseFromParent();
20457 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20458 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20459 // in the .td file.
20460 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20461 const TargetInstrInfo *TII) {
20463 switch (MI->getOpcode()) {
20464 default: llvm_unreachable("illegal opcode!");
20465 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20466 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20467 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20468 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20469 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20470 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20471 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20472 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20475 DebugLoc dl = MI->getDebugLoc();
20476 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20478 unsigned NumArgs = MI->getNumOperands();
20479 for (unsigned i = 1; i < NumArgs; ++i) {
20480 MachineOperand &Op = MI->getOperand(i);
20481 if (!(Op.isReg() && Op.isImplicit()))
20482 MIB.addOperand(Op);
20484 if (MI->hasOneMemOperand())
20485 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20487 BuildMI(*BB, MI, dl,
20488 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20489 .addReg(X86::XMM0);
20491 MI->eraseFromParent();
20495 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20496 // defs in an instruction pattern
20497 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20498 const TargetInstrInfo *TII) {
20500 switch (MI->getOpcode()) {
20501 default: llvm_unreachable("illegal opcode!");
20502 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20503 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20504 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20505 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20506 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20507 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20508 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20509 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20512 DebugLoc dl = MI->getDebugLoc();
20513 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20515 unsigned NumArgs = MI->getNumOperands(); // remove the results
20516 for (unsigned i = 1; i < NumArgs; ++i) {
20517 MachineOperand &Op = MI->getOperand(i);
20518 if (!(Op.isReg() && Op.isImplicit()))
20519 MIB.addOperand(Op);
20521 if (MI->hasOneMemOperand())
20522 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20524 BuildMI(*BB, MI, dl,
20525 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20528 MI->eraseFromParent();
20532 static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20533 const TargetInstrInfo *TII,
20534 const X86Subtarget* Subtarget) {
20535 DebugLoc dl = MI->getDebugLoc();
20537 // Address into RAX/EAX, other two args into ECX, EDX.
20538 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20539 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20540 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20541 for (int i = 0; i < X86::AddrNumOperands; ++i)
20542 MIB.addOperand(MI->getOperand(i));
20544 unsigned ValOps = X86::AddrNumOperands;
20545 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20546 .addReg(MI->getOperand(ValOps).getReg());
20547 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20548 .addReg(MI->getOperand(ValOps+1).getReg());
20550 // The instruction doesn't actually take any operands though.
20551 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20553 MI->eraseFromParent(); // The pseudo is gone now.
20557 MachineBasicBlock *
20558 X86TargetLowering::EmitVAARG64WithCustomInserter(
20560 MachineBasicBlock *MBB) const {
20561 // Emit va_arg instruction on X86-64.
20563 // Operands to this pseudo-instruction:
20564 // 0 ) Output : destination address (reg)
20565 // 1-5) Input : va_list address (addr, i64mem)
20566 // 6 ) ArgSize : Size (in bytes) of vararg type
20567 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20568 // 8 ) Align : Alignment of type
20569 // 9 ) EFLAGS (implicit-def)
20571 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20572 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20574 unsigned DestReg = MI->getOperand(0).getReg();
20575 MachineOperand &Base = MI->getOperand(1);
20576 MachineOperand &Scale = MI->getOperand(2);
20577 MachineOperand &Index = MI->getOperand(3);
20578 MachineOperand &Disp = MI->getOperand(4);
20579 MachineOperand &Segment = MI->getOperand(5);
20580 unsigned ArgSize = MI->getOperand(6).getImm();
20581 unsigned ArgMode = MI->getOperand(7).getImm();
20582 unsigned Align = MI->getOperand(8).getImm();
20584 // Memory Reference
20585 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20586 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20587 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20589 // Machine Information
20590 const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo();
20591 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20592 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20593 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20594 DebugLoc DL = MI->getDebugLoc();
20596 // struct va_list {
20599 // i64 overflow_area (address)
20600 // i64 reg_save_area (address)
20602 // sizeof(va_list) = 24
20603 // alignment(va_list) = 8
20605 unsigned TotalNumIntRegs = 6;
20606 unsigned TotalNumXMMRegs = 8;
20607 bool UseGPOffset = (ArgMode == 1);
20608 bool UseFPOffset = (ArgMode == 2);
20609 unsigned MaxOffset = TotalNumIntRegs * 8 +
20610 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20612 /* Align ArgSize to a multiple of 8 */
20613 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20614 bool NeedsAlign = (Align > 8);
20616 MachineBasicBlock *thisMBB = MBB;
20617 MachineBasicBlock *overflowMBB;
20618 MachineBasicBlock *offsetMBB;
20619 MachineBasicBlock *endMBB;
20621 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20622 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20623 unsigned OffsetReg = 0;
20625 if (!UseGPOffset && !UseFPOffset) {
20626 // If we only pull from the overflow region, we don't create a branch.
20627 // We don't need to alter control flow.
20628 OffsetDestReg = 0; // unused
20629 OverflowDestReg = DestReg;
20631 offsetMBB = nullptr;
20632 overflowMBB = thisMBB;
20635 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20636 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20637 // If not, pull from overflow_area. (branch to overflowMBB)
20642 // offsetMBB overflowMBB
20647 // Registers for the PHI in endMBB
20648 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20649 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20651 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20652 MachineFunction *MF = MBB->getParent();
20653 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20654 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20655 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20657 MachineFunction::iterator MBBIter = MBB;
20660 // Insert the new basic blocks
20661 MF->insert(MBBIter, offsetMBB);
20662 MF->insert(MBBIter, overflowMBB);
20663 MF->insert(MBBIter, endMBB);
20665 // Transfer the remainder of MBB and its successor edges to endMBB.
20666 endMBB->splice(endMBB->begin(), thisMBB,
20667 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20668 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20670 // Make offsetMBB and overflowMBB successors of thisMBB
20671 thisMBB->addSuccessor(offsetMBB);
20672 thisMBB->addSuccessor(overflowMBB);
20674 // endMBB is a successor of both offsetMBB and overflowMBB
20675 offsetMBB->addSuccessor(endMBB);
20676 overflowMBB->addSuccessor(endMBB);
20678 // Load the offset value into a register
20679 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20680 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20684 .addDisp(Disp, UseFPOffset ? 4 : 0)
20685 .addOperand(Segment)
20686 .setMemRefs(MMOBegin, MMOEnd);
20688 // Check if there is enough room left to pull this argument.
20689 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20691 .addImm(MaxOffset + 8 - ArgSizeA8);
20693 // Branch to "overflowMBB" if offset >= max
20694 // Fall through to "offsetMBB" otherwise
20695 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20696 .addMBB(overflowMBB);
20699 // In offsetMBB, emit code to use the reg_save_area.
20701 assert(OffsetReg != 0);
20703 // Read the reg_save_area address.
20704 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20705 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20710 .addOperand(Segment)
20711 .setMemRefs(MMOBegin, MMOEnd);
20713 // Zero-extend the offset
20714 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20715 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20718 .addImm(X86::sub_32bit);
20720 // Add the offset to the reg_save_area to get the final address.
20721 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20722 .addReg(OffsetReg64)
20723 .addReg(RegSaveReg);
20725 // Compute the offset for the next argument
20726 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20727 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20729 .addImm(UseFPOffset ? 16 : 8);
20731 // Store it back into the va_list.
20732 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20736 .addDisp(Disp, UseFPOffset ? 4 : 0)
20737 .addOperand(Segment)
20738 .addReg(NextOffsetReg)
20739 .setMemRefs(MMOBegin, MMOEnd);
20742 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20747 // Emit code to use overflow area
20750 // Load the overflow_area address into a register.
20751 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20752 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
20757 .addOperand(Segment)
20758 .setMemRefs(MMOBegin, MMOEnd);
20760 // If we need to align it, do so. Otherwise, just copy the address
20761 // to OverflowDestReg.
20763 // Align the overflow address
20764 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
20765 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
20767 // aligned_addr = (addr + (align-1)) & ~(align-1)
20768 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
20769 .addReg(OverflowAddrReg)
20772 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
20774 .addImm(~(uint64_t)(Align-1));
20776 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
20777 .addReg(OverflowAddrReg);
20780 // Compute the next overflow address after this argument.
20781 // (the overflow address should be kept 8-byte aligned)
20782 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
20783 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
20784 .addReg(OverflowDestReg)
20785 .addImm(ArgSizeA8);
20787 // Store the new overflow address.
20788 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
20793 .addOperand(Segment)
20794 .addReg(NextAddrReg)
20795 .setMemRefs(MMOBegin, MMOEnd);
20797 // If we branched, emit the PHI to the front of endMBB.
20799 BuildMI(*endMBB, endMBB->begin(), DL,
20800 TII->get(X86::PHI), DestReg)
20801 .addReg(OffsetDestReg).addMBB(offsetMBB)
20802 .addReg(OverflowDestReg).addMBB(overflowMBB);
20805 // Erase the pseudo instruction
20806 MI->eraseFromParent();
20811 MachineBasicBlock *
20812 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
20814 MachineBasicBlock *MBB) const {
20815 // Emit code to save XMM registers to the stack. The ABI says that the
20816 // number of registers to save is given in %al, so it's theoretically
20817 // possible to do an indirect jump trick to avoid saving all of them,
20818 // however this code takes a simpler approach and just executes all
20819 // of the stores if %al is non-zero. It's less code, and it's probably
20820 // easier on the hardware branch predictor, and stores aren't all that
20821 // expensive anyway.
20823 // Create the new basic blocks. One block contains all the XMM stores,
20824 // and one block is the final destination regardless of whether any
20825 // stores were performed.
20826 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20827 MachineFunction *F = MBB->getParent();
20828 MachineFunction::iterator MBBIter = MBB;
20830 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
20831 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
20832 F->insert(MBBIter, XMMSaveMBB);
20833 F->insert(MBBIter, EndMBB);
20835 // Transfer the remainder of MBB and its successor edges to EndMBB.
20836 EndMBB->splice(EndMBB->begin(), MBB,
20837 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20838 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
20840 // The original block will now fall through to the XMM save block.
20841 MBB->addSuccessor(XMMSaveMBB);
20842 // The XMMSaveMBB will fall through to the end block.
20843 XMMSaveMBB->addSuccessor(EndMBB);
20845 // Now add the instructions.
20846 const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo();
20847 DebugLoc DL = MI->getDebugLoc();
20849 unsigned CountReg = MI->getOperand(0).getReg();
20850 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
20851 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
20853 if (!Subtarget->isTargetWin64()) {
20854 // If %al is 0, branch around the XMM save block.
20855 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
20856 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
20857 MBB->addSuccessor(EndMBB);
20860 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
20861 // that was just emitted, but clearly shouldn't be "saved".
20862 assert((MI->getNumOperands() <= 3 ||
20863 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
20864 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
20865 && "Expected last argument to be EFLAGS");
20866 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
20867 // In the XMM save block, save all the XMM argument registers.
20868 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
20869 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
20870 MachineMemOperand *MMO =
20871 F->getMachineMemOperand(
20872 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
20873 MachineMemOperand::MOStore,
20874 /*Size=*/16, /*Align=*/16);
20875 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
20876 .addFrameIndex(RegSaveFrameIndex)
20877 .addImm(/*Scale=*/1)
20878 .addReg(/*IndexReg=*/0)
20879 .addImm(/*Disp=*/Offset)
20880 .addReg(/*Segment=*/0)
20881 .addReg(MI->getOperand(i).getReg())
20882 .addMemOperand(MMO);
20885 MI->eraseFromParent(); // The pseudo instruction is gone now.
20890 // The EFLAGS operand of SelectItr might be missing a kill marker
20891 // because there were multiple uses of EFLAGS, and ISel didn't know
20892 // which to mark. Figure out whether SelectItr should have had a
20893 // kill marker, and set it if it should. Returns the correct kill
20895 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
20896 MachineBasicBlock* BB,
20897 const TargetRegisterInfo* TRI) {
20898 // Scan forward through BB for a use/def of EFLAGS.
20899 MachineBasicBlock::iterator miI(std::next(SelectItr));
20900 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
20901 const MachineInstr& mi = *miI;
20902 if (mi.readsRegister(X86::EFLAGS))
20904 if (mi.definesRegister(X86::EFLAGS))
20905 break; // Should have kill-flag - update below.
20908 // If we hit the end of the block, check whether EFLAGS is live into a
20910 if (miI == BB->end()) {
20911 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
20912 sEnd = BB->succ_end();
20913 sItr != sEnd; ++sItr) {
20914 MachineBasicBlock* succ = *sItr;
20915 if (succ->isLiveIn(X86::EFLAGS))
20920 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
20921 // out. SelectMI should have a kill flag on EFLAGS.
20922 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
20926 MachineBasicBlock *
20927 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
20928 MachineBasicBlock *BB) const {
20929 const TargetInstrInfo *TII = BB->getParent()->getSubtarget().getInstrInfo();
20930 DebugLoc DL = MI->getDebugLoc();
20932 // To "insert" a SELECT_CC instruction, we actually have to insert the
20933 // diamond control-flow pattern. The incoming instruction knows the
20934 // destination vreg to set, the condition code register to branch on, the
20935 // true/false values to select between, and a branch opcode to use.
20936 const BasicBlock *LLVM_BB = BB->getBasicBlock();
20937 MachineFunction::iterator It = BB;
20943 // cmpTY ccX, r1, r2
20945 // fallthrough --> copy0MBB
20946 MachineBasicBlock *thisMBB = BB;
20947 MachineFunction *F = BB->getParent();
20948 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
20949 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
20950 F->insert(It, copy0MBB);
20951 F->insert(It, sinkMBB);
20953 // If the EFLAGS register isn't dead in the terminator, then claim that it's
20954 // live into the sink and copy blocks.
20955 const TargetRegisterInfo *TRI =
20956 BB->getParent()->getSubtarget().getRegisterInfo();
20957 if (!MI->killsRegister(X86::EFLAGS) &&
20958 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
20959 copy0MBB->addLiveIn(X86::EFLAGS);
20960 sinkMBB->addLiveIn(X86::EFLAGS);
20963 // Transfer the remainder of BB and its successor edges to sinkMBB.
20964 sinkMBB->splice(sinkMBB->begin(), BB,
20965 std::next(MachineBasicBlock::iterator(MI)), BB->end());
20966 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
20968 // Add the true and fallthrough blocks as its successors.
20969 BB->addSuccessor(copy0MBB);
20970 BB->addSuccessor(sinkMBB);
20972 // Create the conditional branch instruction.
20974 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
20975 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
20978 // %FalseValue = ...
20979 // # fallthrough to sinkMBB
20980 copy0MBB->addSuccessor(sinkMBB);
20983 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
20985 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20986 TII->get(X86::PHI), MI->getOperand(0).getReg())
20987 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
20988 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
20990 MI->eraseFromParent(); // The pseudo instruction is gone now.
20994 MachineBasicBlock *
20995 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
20996 MachineBasicBlock *BB) const {
20997 MachineFunction *MF = BB->getParent();
20998 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
20999 DebugLoc DL = MI->getDebugLoc();
21000 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21002 assert(MF->shouldSplitStack());
21004 const bool Is64Bit = Subtarget->is64Bit();
21005 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21007 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21008 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21011 // ... [Till the alloca]
21012 // If stacklet is not large enough, jump to mallocMBB
21015 // Allocate by subtracting from RSP
21016 // Jump to continueMBB
21019 // Allocate by call to runtime
21023 // [rest of original BB]
21026 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21027 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21028 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21030 MachineRegisterInfo &MRI = MF->getRegInfo();
21031 const TargetRegisterClass *AddrRegClass =
21032 getRegClassFor(getPointerTy());
21034 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21035 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21036 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21037 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21038 sizeVReg = MI->getOperand(1).getReg(),
21039 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21041 MachineFunction::iterator MBBIter = BB;
21044 MF->insert(MBBIter, bumpMBB);
21045 MF->insert(MBBIter, mallocMBB);
21046 MF->insert(MBBIter, continueMBB);
21048 continueMBB->splice(continueMBB->begin(), BB,
21049 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21050 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21052 // Add code to the main basic block to check if the stack limit has been hit,
21053 // and if so, jump to mallocMBB otherwise to bumpMBB.
21054 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21055 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21056 .addReg(tmpSPVReg).addReg(sizeVReg);
21057 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21058 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21059 .addReg(SPLimitVReg);
21060 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21062 // bumpMBB simply decreases the stack pointer, since we know the current
21063 // stacklet has enough space.
21064 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21065 .addReg(SPLimitVReg);
21066 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21067 .addReg(SPLimitVReg);
21068 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21070 // Calls into a routine in libgcc to allocate more space from the heap.
21071 const uint32_t *RegMask = MF->getTarget()
21072 .getSubtargetImpl()
21073 ->getRegisterInfo()
21074 ->getCallPreservedMask(CallingConv::C);
21076 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21078 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21079 .addExternalSymbol("__morestack_allocate_stack_space")
21080 .addRegMask(RegMask)
21081 .addReg(X86::RDI, RegState::Implicit)
21082 .addReg(X86::RAX, RegState::ImplicitDefine);
21083 } else if (Is64Bit) {
21084 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21086 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21087 .addExternalSymbol("__morestack_allocate_stack_space")
21088 .addRegMask(RegMask)
21089 .addReg(X86::EDI, RegState::Implicit)
21090 .addReg(X86::EAX, RegState::ImplicitDefine);
21092 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21094 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21095 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21096 .addExternalSymbol("__morestack_allocate_stack_space")
21097 .addRegMask(RegMask)
21098 .addReg(X86::EAX, RegState::ImplicitDefine);
21102 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21105 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21106 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21107 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21109 // Set up the CFG correctly.
21110 BB->addSuccessor(bumpMBB);
21111 BB->addSuccessor(mallocMBB);
21112 mallocMBB->addSuccessor(continueMBB);
21113 bumpMBB->addSuccessor(continueMBB);
21115 // Take care of the PHI nodes.
21116 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21117 MI->getOperand(0).getReg())
21118 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21119 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21121 // Delete the original pseudo instruction.
21122 MI->eraseFromParent();
21125 return continueMBB;
21128 MachineBasicBlock *
21129 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21130 MachineBasicBlock *BB) const {
21131 const TargetInstrInfo *TII = BB->getParent()->getSubtarget().getInstrInfo();
21132 DebugLoc DL = MI->getDebugLoc();
21134 assert(!Subtarget->isTargetMachO());
21136 // The lowering is pretty easy: we're just emitting the call to _alloca. The
21137 // non-trivial part is impdef of ESP.
21139 if (Subtarget->isTargetWin64()) {
21140 if (Subtarget->isTargetCygMing()) {
21141 // ___chkstk(Mingw64):
21142 // Clobbers R10, R11, RAX and EFLAGS.
21144 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
21145 .addExternalSymbol("___chkstk")
21146 .addReg(X86::RAX, RegState::Implicit)
21147 .addReg(X86::RSP, RegState::Implicit)
21148 .addReg(X86::RAX, RegState::Define | RegState::Implicit)
21149 .addReg(X86::RSP, RegState::Define | RegState::Implicit)
21150 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
21152 // __chkstk(MSVCRT): does not update stack pointer.
21153 // Clobbers R10, R11 and EFLAGS.
21154 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
21155 .addExternalSymbol("__chkstk")
21156 .addReg(X86::RAX, RegState::Implicit)
21157 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
21158 // RAX has the offset to be subtracted from RSP.
21159 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP)
21164 const char *StackProbeSymbol = (Subtarget->isTargetKnownWindowsMSVC() ||
21165 Subtarget->isTargetWindowsItanium())
21169 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32))
21170 .addExternalSymbol(StackProbeSymbol)
21171 .addReg(X86::EAX, RegState::Implicit)
21172 .addReg(X86::ESP, RegState::Implicit)
21173 .addReg(X86::EAX, RegState::Define | RegState::Implicit)
21174 .addReg(X86::ESP, RegState::Define | RegState::Implicit)
21175 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
21178 MI->eraseFromParent(); // The pseudo instruction is gone now.
21182 MachineBasicBlock *
21183 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21184 MachineBasicBlock *BB) const {
21185 // This is pretty easy. We're taking the value that we received from
21186 // our load from the relocation, sticking it in either RDI (x86-64)
21187 // or EAX and doing an indirect call. The return value will then
21188 // be in the normal return register.
21189 MachineFunction *F = BB->getParent();
21190 const X86InstrInfo *TII =
21191 static_cast<const X86InstrInfo *>(F->getSubtarget().getInstrInfo());
21192 DebugLoc DL = MI->getDebugLoc();
21194 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21195 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21197 // Get a register mask for the lowered call.
21198 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21199 // proper register mask.
21200 const uint32_t *RegMask = F->getTarget()
21201 .getSubtargetImpl()
21202 ->getRegisterInfo()
21203 ->getCallPreservedMask(CallingConv::C);
21204 if (Subtarget->is64Bit()) {
21205 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21206 TII->get(X86::MOV64rm), X86::RDI)
21208 .addImm(0).addReg(0)
21209 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21210 MI->getOperand(3).getTargetFlags())
21212 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21213 addDirectMem(MIB, X86::RDI);
21214 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21215 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21216 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21217 TII->get(X86::MOV32rm), X86::EAX)
21219 .addImm(0).addReg(0)
21220 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21221 MI->getOperand(3).getTargetFlags())
21223 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21224 addDirectMem(MIB, X86::EAX);
21225 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21227 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21228 TII->get(X86::MOV32rm), X86::EAX)
21229 .addReg(TII->getGlobalBaseReg(F))
21230 .addImm(0).addReg(0)
21231 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21232 MI->getOperand(3).getTargetFlags())
21234 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21235 addDirectMem(MIB, X86::EAX);
21236 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21239 MI->eraseFromParent(); // The pseudo instruction is gone now.
21243 MachineBasicBlock *
21244 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21245 MachineBasicBlock *MBB) const {
21246 DebugLoc DL = MI->getDebugLoc();
21247 MachineFunction *MF = MBB->getParent();
21248 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
21249 MachineRegisterInfo &MRI = MF->getRegInfo();
21251 const BasicBlock *BB = MBB->getBasicBlock();
21252 MachineFunction::iterator I = MBB;
21255 // Memory Reference
21256 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21257 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21260 unsigned MemOpndSlot = 0;
21262 unsigned CurOp = 0;
21264 DstReg = MI->getOperand(CurOp++).getReg();
21265 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21266 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21267 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21268 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21270 MemOpndSlot = CurOp;
21272 MVT PVT = getPointerTy();
21273 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21274 "Invalid Pointer Size!");
21276 // For v = setjmp(buf), we generate
21279 // buf[LabelOffset] = restoreMBB
21280 // SjLjSetup restoreMBB
21286 // v = phi(main, restore)
21289 // if base pointer being used, load it from frame
21292 MachineBasicBlock *thisMBB = MBB;
21293 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21294 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21295 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21296 MF->insert(I, mainMBB);
21297 MF->insert(I, sinkMBB);
21298 MF->push_back(restoreMBB);
21300 MachineInstrBuilder MIB;
21302 // Transfer the remainder of BB and its successor edges to sinkMBB.
21303 sinkMBB->splice(sinkMBB->begin(), MBB,
21304 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21305 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21308 unsigned PtrStoreOpc = 0;
21309 unsigned LabelReg = 0;
21310 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21311 Reloc::Model RM = MF->getTarget().getRelocationModel();
21312 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21313 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21315 // Prepare IP either in reg or imm.
21316 if (!UseImmLabel) {
21317 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21318 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21319 LabelReg = MRI.createVirtualRegister(PtrRC);
21320 if (Subtarget->is64Bit()) {
21321 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21325 .addMBB(restoreMBB)
21328 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21329 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21330 .addReg(XII->getGlobalBaseReg(MF))
21333 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21337 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21339 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21340 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21341 if (i == X86::AddrDisp)
21342 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21344 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21347 MIB.addReg(LabelReg);
21349 MIB.addMBB(restoreMBB);
21350 MIB.setMemRefs(MMOBegin, MMOEnd);
21352 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21353 .addMBB(restoreMBB);
21355 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
21356 MF->getSubtarget().getRegisterInfo());
21357 MIB.addRegMask(RegInfo->getNoPreservedMask());
21358 thisMBB->addSuccessor(mainMBB);
21359 thisMBB->addSuccessor(restoreMBB);
21363 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21364 mainMBB->addSuccessor(sinkMBB);
21367 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21368 TII->get(X86::PHI), DstReg)
21369 .addReg(mainDstReg).addMBB(mainMBB)
21370 .addReg(restoreDstReg).addMBB(restoreMBB);
21373 if (RegInfo->hasBasePointer(*MF)) {
21374 const X86Subtarget &STI = MF->getTarget().getSubtarget<X86Subtarget>();
21375 const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
21376 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21377 X86FI->setRestoreBasePointer(MF);
21378 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21379 unsigned BasePtr = RegInfo->getBaseRegister();
21380 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21381 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21382 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21383 .setMIFlag(MachineInstr::FrameSetup);
21385 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21386 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21387 restoreMBB->addSuccessor(sinkMBB);
21389 MI->eraseFromParent();
21393 MachineBasicBlock *
21394 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21395 MachineBasicBlock *MBB) const {
21396 DebugLoc DL = MI->getDebugLoc();
21397 MachineFunction *MF = MBB->getParent();
21398 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
21399 MachineRegisterInfo &MRI = MF->getRegInfo();
21401 // Memory Reference
21402 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21403 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21405 MVT PVT = getPointerTy();
21406 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21407 "Invalid Pointer Size!");
21409 const TargetRegisterClass *RC =
21410 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21411 unsigned Tmp = MRI.createVirtualRegister(RC);
21412 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21413 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
21414 MF->getSubtarget().getRegisterInfo());
21415 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21416 unsigned SP = RegInfo->getStackRegister();
21418 MachineInstrBuilder MIB;
21420 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21421 const int64_t SPOffset = 2 * PVT.getStoreSize();
21423 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21424 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21427 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21428 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21429 MIB.addOperand(MI->getOperand(i));
21430 MIB.setMemRefs(MMOBegin, MMOEnd);
21432 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21433 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21434 if (i == X86::AddrDisp)
21435 MIB.addDisp(MI->getOperand(i), LabelOffset);
21437 MIB.addOperand(MI->getOperand(i));
21439 MIB.setMemRefs(MMOBegin, MMOEnd);
21441 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21442 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21443 if (i == X86::AddrDisp)
21444 MIB.addDisp(MI->getOperand(i), SPOffset);
21446 MIB.addOperand(MI->getOperand(i));
21448 MIB.setMemRefs(MMOBegin, MMOEnd);
21450 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21452 MI->eraseFromParent();
21456 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21457 // accumulator loops. Writing back to the accumulator allows the coalescer
21458 // to remove extra copies in the loop.
21459 MachineBasicBlock *
21460 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21461 MachineBasicBlock *MBB) const {
21462 MachineOperand &AddendOp = MI->getOperand(3);
21464 // Bail out early if the addend isn't a register - we can't switch these.
21465 if (!AddendOp.isReg())
21468 MachineFunction &MF = *MBB->getParent();
21469 MachineRegisterInfo &MRI = MF.getRegInfo();
21471 // Check whether the addend is defined by a PHI:
21472 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21473 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21474 if (!AddendDef.isPHI())
21477 // Look for the following pattern:
21479 // %addend = phi [%entry, 0], [%loop, %result]
21481 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21485 // %addend = phi [%entry, 0], [%loop, %result]
21487 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21489 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21490 assert(AddendDef.getOperand(i).isReg());
21491 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21492 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21493 if (&PHISrcInst == MI) {
21494 // Found a matching instruction.
21495 unsigned NewFMAOpc = 0;
21496 switch (MI->getOpcode()) {
21497 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21498 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21499 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21500 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21501 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21502 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21503 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21504 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21505 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21506 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21507 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21508 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21509 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21510 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21511 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21512 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21513 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21514 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21515 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21516 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21518 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21519 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21520 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21521 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21522 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21523 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21524 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21525 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21526 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21527 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21528 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21529 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21530 default: llvm_unreachable("Unrecognized FMA variant.");
21533 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
21534 MachineInstrBuilder MIB =
21535 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21536 .addOperand(MI->getOperand(0))
21537 .addOperand(MI->getOperand(3))
21538 .addOperand(MI->getOperand(2))
21539 .addOperand(MI->getOperand(1));
21540 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21541 MI->eraseFromParent();
21548 MachineBasicBlock *
21549 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21550 MachineBasicBlock *BB) const {
21551 switch (MI->getOpcode()) {
21552 default: llvm_unreachable("Unexpected instr type to insert");
21553 case X86::TAILJMPd64:
21554 case X86::TAILJMPr64:
21555 case X86::TAILJMPm64:
21556 llvm_unreachable("TAILJMP64 would not be touched here.");
21557 case X86::TCRETURNdi64:
21558 case X86::TCRETURNri64:
21559 case X86::TCRETURNmi64:
21561 case X86::WIN_ALLOCA:
21562 return EmitLoweredWinAlloca(MI, BB);
21563 case X86::SEG_ALLOCA_32:
21564 case X86::SEG_ALLOCA_64:
21565 return EmitLoweredSegAlloca(MI, BB);
21566 case X86::TLSCall_32:
21567 case X86::TLSCall_64:
21568 return EmitLoweredTLSCall(MI, BB);
21569 case X86::CMOV_GR8:
21570 case X86::CMOV_FR32:
21571 case X86::CMOV_FR64:
21572 case X86::CMOV_V4F32:
21573 case X86::CMOV_V2F64:
21574 case X86::CMOV_V2I64:
21575 case X86::CMOV_V8F32:
21576 case X86::CMOV_V4F64:
21577 case X86::CMOV_V4I64:
21578 case X86::CMOV_V16F32:
21579 case X86::CMOV_V8F64:
21580 case X86::CMOV_V8I64:
21581 case X86::CMOV_GR16:
21582 case X86::CMOV_GR32:
21583 case X86::CMOV_RFP32:
21584 case X86::CMOV_RFP64:
21585 case X86::CMOV_RFP80:
21586 return EmitLoweredSelect(MI, BB);
21588 case X86::FP32_TO_INT16_IN_MEM:
21589 case X86::FP32_TO_INT32_IN_MEM:
21590 case X86::FP32_TO_INT64_IN_MEM:
21591 case X86::FP64_TO_INT16_IN_MEM:
21592 case X86::FP64_TO_INT32_IN_MEM:
21593 case X86::FP64_TO_INT64_IN_MEM:
21594 case X86::FP80_TO_INT16_IN_MEM:
21595 case X86::FP80_TO_INT32_IN_MEM:
21596 case X86::FP80_TO_INT64_IN_MEM: {
21597 MachineFunction *F = BB->getParent();
21598 const TargetInstrInfo *TII = F->getSubtarget().getInstrInfo();
21599 DebugLoc DL = MI->getDebugLoc();
21601 // Change the floating point control register to use "round towards zero"
21602 // mode when truncating to an integer value.
21603 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21604 addFrameReference(BuildMI(*BB, MI, DL,
21605 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21607 // Load the old value of the high byte of the control word...
21609 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21610 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21613 // Set the high part to be round to zero...
21614 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21617 // Reload the modified control word now...
21618 addFrameReference(BuildMI(*BB, MI, DL,
21619 TII->get(X86::FLDCW16m)), CWFrameIdx);
21621 // Restore the memory image of control word to original value
21622 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21625 // Get the X86 opcode to use.
21627 switch (MI->getOpcode()) {
21628 default: llvm_unreachable("illegal opcode!");
21629 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21630 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21631 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21632 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21633 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21634 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21635 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21636 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21637 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21641 MachineOperand &Op = MI->getOperand(0);
21643 AM.BaseType = X86AddressMode::RegBase;
21644 AM.Base.Reg = Op.getReg();
21646 AM.BaseType = X86AddressMode::FrameIndexBase;
21647 AM.Base.FrameIndex = Op.getIndex();
21649 Op = MI->getOperand(1);
21651 AM.Scale = Op.getImm();
21652 Op = MI->getOperand(2);
21654 AM.IndexReg = Op.getImm();
21655 Op = MI->getOperand(3);
21656 if (Op.isGlobal()) {
21657 AM.GV = Op.getGlobal();
21659 AM.Disp = Op.getImm();
21661 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21662 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21664 // Reload the original control word now.
21665 addFrameReference(BuildMI(*BB, MI, DL,
21666 TII->get(X86::FLDCW16m)), CWFrameIdx);
21668 MI->eraseFromParent(); // The pseudo instruction is gone now.
21671 // String/text processing lowering.
21672 case X86::PCMPISTRM128REG:
21673 case X86::VPCMPISTRM128REG:
21674 case X86::PCMPISTRM128MEM:
21675 case X86::VPCMPISTRM128MEM:
21676 case X86::PCMPESTRM128REG:
21677 case X86::VPCMPESTRM128REG:
21678 case X86::PCMPESTRM128MEM:
21679 case X86::VPCMPESTRM128MEM:
21680 assert(Subtarget->hasSSE42() &&
21681 "Target must have SSE4.2 or AVX features enabled");
21682 return EmitPCMPSTRM(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21684 // String/text processing lowering.
21685 case X86::PCMPISTRIREG:
21686 case X86::VPCMPISTRIREG:
21687 case X86::PCMPISTRIMEM:
21688 case X86::VPCMPISTRIMEM:
21689 case X86::PCMPESTRIREG:
21690 case X86::VPCMPESTRIREG:
21691 case X86::PCMPESTRIMEM:
21692 case X86::VPCMPESTRIMEM:
21693 assert(Subtarget->hasSSE42() &&
21694 "Target must have SSE4.2 or AVX features enabled");
21695 return EmitPCMPSTRI(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21697 // Thread synchronization.
21699 return EmitMonitor(MI, BB, BB->getParent()->getSubtarget().getInstrInfo(),
21704 return EmitXBegin(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21706 case X86::VASTART_SAVE_XMM_REGS:
21707 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21709 case X86::VAARG_64:
21710 return EmitVAARG64WithCustomInserter(MI, BB);
21712 case X86::EH_SjLj_SetJmp32:
21713 case X86::EH_SjLj_SetJmp64:
21714 return emitEHSjLjSetJmp(MI, BB);
21716 case X86::EH_SjLj_LongJmp32:
21717 case X86::EH_SjLj_LongJmp64:
21718 return emitEHSjLjLongJmp(MI, BB);
21720 case TargetOpcode::STATEPOINT:
21721 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21722 // this point in the process. We diverge later.
21723 return emitPatchPoint(MI, BB);
21725 case TargetOpcode::STACKMAP:
21726 case TargetOpcode::PATCHPOINT:
21727 return emitPatchPoint(MI, BB);
21729 case X86::VFMADDPDr213r:
21730 case X86::VFMADDPSr213r:
21731 case X86::VFMADDSDr213r:
21732 case X86::VFMADDSSr213r:
21733 case X86::VFMSUBPDr213r:
21734 case X86::VFMSUBPSr213r:
21735 case X86::VFMSUBSDr213r:
21736 case X86::VFMSUBSSr213r:
21737 case X86::VFNMADDPDr213r:
21738 case X86::VFNMADDPSr213r:
21739 case X86::VFNMADDSDr213r:
21740 case X86::VFNMADDSSr213r:
21741 case X86::VFNMSUBPDr213r:
21742 case X86::VFNMSUBPSr213r:
21743 case X86::VFNMSUBSDr213r:
21744 case X86::VFNMSUBSSr213r:
21745 case X86::VFMADDSUBPDr213r:
21746 case X86::VFMADDSUBPSr213r:
21747 case X86::VFMSUBADDPDr213r:
21748 case X86::VFMSUBADDPSr213r:
21749 case X86::VFMADDPDr213rY:
21750 case X86::VFMADDPSr213rY:
21751 case X86::VFMSUBPDr213rY:
21752 case X86::VFMSUBPSr213rY:
21753 case X86::VFNMADDPDr213rY:
21754 case X86::VFNMADDPSr213rY:
21755 case X86::VFNMSUBPDr213rY:
21756 case X86::VFNMSUBPSr213rY:
21757 case X86::VFMADDSUBPDr213rY:
21758 case X86::VFMADDSUBPSr213rY:
21759 case X86::VFMSUBADDPDr213rY:
21760 case X86::VFMSUBADDPSr213rY:
21761 return emitFMA3Instr(MI, BB);
21765 //===----------------------------------------------------------------------===//
21766 // X86 Optimization Hooks
21767 //===----------------------------------------------------------------------===//
21769 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21772 const SelectionDAG &DAG,
21773 unsigned Depth) const {
21774 unsigned BitWidth = KnownZero.getBitWidth();
21775 unsigned Opc = Op.getOpcode();
21776 assert((Opc >= ISD::BUILTIN_OP_END ||
21777 Opc == ISD::INTRINSIC_WO_CHAIN ||
21778 Opc == ISD::INTRINSIC_W_CHAIN ||
21779 Opc == ISD::INTRINSIC_VOID) &&
21780 "Should use MaskedValueIsZero if you don't know whether Op"
21781 " is a target node!");
21783 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21797 // These nodes' second result is a boolean.
21798 if (Op.getResNo() == 0)
21801 case X86ISD::SETCC:
21802 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
21804 case ISD::INTRINSIC_WO_CHAIN: {
21805 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
21806 unsigned NumLoBits = 0;
21809 case Intrinsic::x86_sse_movmsk_ps:
21810 case Intrinsic::x86_avx_movmsk_ps_256:
21811 case Intrinsic::x86_sse2_movmsk_pd:
21812 case Intrinsic::x86_avx_movmsk_pd_256:
21813 case Intrinsic::x86_mmx_pmovmskb:
21814 case Intrinsic::x86_sse2_pmovmskb_128:
21815 case Intrinsic::x86_avx2_pmovmskb: {
21816 // High bits of movmskp{s|d}, pmovmskb are known zero.
21818 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
21819 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
21820 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
21821 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
21822 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
21823 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
21824 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
21825 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
21827 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
21836 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
21838 const SelectionDAG &,
21839 unsigned Depth) const {
21840 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
21841 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
21842 return Op.getValueType().getScalarType().getSizeInBits();
21848 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
21849 /// node is a GlobalAddress + offset.
21850 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
21851 const GlobalValue* &GA,
21852 int64_t &Offset) const {
21853 if (N->getOpcode() == X86ISD::Wrapper) {
21854 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
21855 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
21856 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
21860 return TargetLowering::isGAPlusOffset(N, GA, Offset);
21863 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
21864 /// same as extracting the high 128-bit part of 256-bit vector and then
21865 /// inserting the result into the low part of a new 256-bit vector
21866 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
21867 EVT VT = SVOp->getValueType(0);
21868 unsigned NumElems = VT.getVectorNumElements();
21870 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21871 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
21872 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21873 SVOp->getMaskElt(j) >= 0)
21879 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
21880 /// same as extracting the low 128-bit part of 256-bit vector and then
21881 /// inserting the result into the high part of a new 256-bit vector
21882 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
21883 EVT VT = SVOp->getValueType(0);
21884 unsigned NumElems = VT.getVectorNumElements();
21886 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21887 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
21888 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21889 SVOp->getMaskElt(j) >= 0)
21895 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
21896 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
21897 TargetLowering::DAGCombinerInfo &DCI,
21898 const X86Subtarget* Subtarget) {
21900 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
21901 SDValue V1 = SVOp->getOperand(0);
21902 SDValue V2 = SVOp->getOperand(1);
21903 EVT VT = SVOp->getValueType(0);
21904 unsigned NumElems = VT.getVectorNumElements();
21906 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
21907 V2.getOpcode() == ISD::CONCAT_VECTORS) {
21911 // V UNDEF BUILD_VECTOR UNDEF
21913 // CONCAT_VECTOR CONCAT_VECTOR
21916 // RESULT: V + zero extended
21918 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
21919 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
21920 V1.getOperand(1).getOpcode() != ISD::UNDEF)
21923 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
21926 // To match the shuffle mask, the first half of the mask should
21927 // be exactly the first vector, and all the rest a splat with the
21928 // first element of the second one.
21929 for (unsigned i = 0; i != NumElems/2; ++i)
21930 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
21931 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
21934 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
21935 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
21936 if (Ld->hasNUsesOfValue(1, 0)) {
21937 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
21938 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
21940 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
21942 Ld->getPointerInfo(),
21943 Ld->getAlignment(),
21944 false/*isVolatile*/, true/*ReadMem*/,
21945 false/*WriteMem*/);
21947 // Make sure the newly-created LOAD is in the same position as Ld in
21948 // terms of dependency. We create a TokenFactor for Ld and ResNode,
21949 // and update uses of Ld's output chain to use the TokenFactor.
21950 if (Ld->hasAnyUseOfValue(1)) {
21951 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
21952 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
21953 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
21954 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
21955 SDValue(ResNode.getNode(), 1));
21958 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
21962 // Emit a zeroed vector and insert the desired subvector on its
21964 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
21965 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
21966 return DCI.CombineTo(N, InsV);
21969 //===--------------------------------------------------------------------===//
21970 // Combine some shuffles into subvector extracts and inserts:
21973 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21974 if (isShuffleHigh128VectorInsertLow(SVOp)) {
21975 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
21976 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
21977 return DCI.CombineTo(N, InsV);
21980 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21981 if (isShuffleLow128VectorInsertHigh(SVOp)) {
21982 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
21983 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
21984 return DCI.CombineTo(N, InsV);
21990 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
21993 /// This is the leaf of the recursive combinine below. When we have found some
21994 /// chain of single-use x86 shuffle instructions and accumulated the combined
21995 /// shuffle mask represented by them, this will try to pattern match that mask
21996 /// into either a single instruction if there is a special purpose instruction
21997 /// for this operation, or into a PSHUFB instruction which is a fully general
21998 /// instruction but should only be used to replace chains over a certain depth.
21999 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22000 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22001 TargetLowering::DAGCombinerInfo &DCI,
22002 const X86Subtarget *Subtarget) {
22003 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22005 // Find the operand that enters the chain. Note that multiple uses are OK
22006 // here, we're not going to remove the operand we find.
22007 SDValue Input = Op.getOperand(0);
22008 while (Input.getOpcode() == ISD::BITCAST)
22009 Input = Input.getOperand(0);
22011 MVT VT = Input.getSimpleValueType();
22012 MVT RootVT = Root.getSimpleValueType();
22015 // Just remove no-op shuffle masks.
22016 if (Mask.size() == 1) {
22017 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22022 // Use the float domain if the operand type is a floating point type.
22023 bool FloatDomain = VT.isFloatingPoint();
22025 // For floating point shuffles, we don't have free copies in the shuffle
22026 // instructions or the ability to load as part of the instruction, so
22027 // canonicalize their shuffles to UNPCK or MOV variants.
22029 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22030 // vectors because it can have a load folded into it that UNPCK cannot. This
22031 // doesn't preclude something switching to the shorter encoding post-RA.
22033 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22034 bool Lo = Mask.equals(0, 0);
22037 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22038 // is no slower than UNPCKLPD but has the option to fold the input operand
22039 // into even an unaligned memory load.
22040 if (Lo && Subtarget->hasSSE3()) {
22041 Shuffle = X86ISD::MOVDDUP;
22042 ShuffleVT = MVT::v2f64;
22044 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22045 // than the UNPCK variants.
22046 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22047 ShuffleVT = MVT::v4f32;
22049 if (Depth == 1 && Root->getOpcode() == Shuffle)
22050 return false; // Nothing to do!
22051 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22052 DCI.AddToWorklist(Op.getNode());
22053 if (Shuffle == X86ISD::MOVDDUP)
22054 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22056 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22057 DCI.AddToWorklist(Op.getNode());
22058 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22062 if (Subtarget->hasSSE3() &&
22063 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22064 bool Lo = Mask.equals(0, 0, 2, 2);
22065 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22066 MVT ShuffleVT = MVT::v4f32;
22067 if (Depth == 1 && Root->getOpcode() == Shuffle)
22068 return false; // Nothing to do!
22069 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22070 DCI.AddToWorklist(Op.getNode());
22071 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22072 DCI.AddToWorklist(Op.getNode());
22073 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22077 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22078 bool Lo = Mask.equals(0, 0, 1, 1);
22079 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22080 MVT ShuffleVT = MVT::v4f32;
22081 if (Depth == 1 && Root->getOpcode() == Shuffle)
22082 return false; // Nothing to do!
22083 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22084 DCI.AddToWorklist(Op.getNode());
22085 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22086 DCI.AddToWorklist(Op.getNode());
22087 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22093 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22094 // variants as none of these have single-instruction variants that are
22095 // superior to the UNPCK formulation.
22096 if (!FloatDomain &&
22097 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22098 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22099 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22100 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22102 bool Lo = Mask[0] == 0;
22103 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22104 if (Depth == 1 && Root->getOpcode() == Shuffle)
22105 return false; // Nothing to do!
22107 switch (Mask.size()) {
22109 ShuffleVT = MVT::v8i16;
22112 ShuffleVT = MVT::v16i8;
22115 llvm_unreachable("Impossible mask size!");
22117 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22118 DCI.AddToWorklist(Op.getNode());
22119 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22120 DCI.AddToWorklist(Op.getNode());
22121 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22126 // Don't try to re-form single instruction chains under any circumstances now
22127 // that we've done encoding canonicalization for them.
22131 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22132 // can replace them with a single PSHUFB instruction profitably. Intel's
22133 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22134 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22135 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22136 SmallVector<SDValue, 16> PSHUFBMask;
22137 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22138 int Ratio = 16 / Mask.size();
22139 for (unsigned i = 0; i < 16; ++i) {
22140 if (Mask[i / Ratio] == SM_SentinelUndef) {
22141 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22144 int M = Mask[i / Ratio] != SM_SentinelZero
22145 ? Ratio * Mask[i / Ratio] + i % Ratio
22147 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22149 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22150 DCI.AddToWorklist(Op.getNode());
22151 SDValue PSHUFBMaskOp =
22152 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22153 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22154 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22155 DCI.AddToWorklist(Op.getNode());
22156 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22161 // Failed to find any combines.
22165 /// \brief Fully generic combining of x86 shuffle instructions.
22167 /// This should be the last combine run over the x86 shuffle instructions. Once
22168 /// they have been fully optimized, this will recursively consider all chains
22169 /// of single-use shuffle instructions, build a generic model of the cumulative
22170 /// shuffle operation, and check for simpler instructions which implement this
22171 /// operation. We use this primarily for two purposes:
22173 /// 1) Collapse generic shuffles to specialized single instructions when
22174 /// equivalent. In most cases, this is just an encoding size win, but
22175 /// sometimes we will collapse multiple generic shuffles into a single
22176 /// special-purpose shuffle.
22177 /// 2) Look for sequences of shuffle instructions with 3 or more total
22178 /// instructions, and replace them with the slightly more expensive SSSE3
22179 /// PSHUFB instruction if available. We do this as the last combining step
22180 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22181 /// a suitable short sequence of other instructions. The PHUFB will either
22182 /// use a register or have to read from memory and so is slightly (but only
22183 /// slightly) more expensive than the other shuffle instructions.
22185 /// Because this is inherently a quadratic operation (for each shuffle in
22186 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22187 /// This should never be an issue in practice as the shuffle lowering doesn't
22188 /// produce sequences of more than 8 instructions.
22190 /// FIXME: We will currently miss some cases where the redundant shuffling
22191 /// would simplify under the threshold for PSHUFB formation because of
22192 /// combine-ordering. To fix this, we should do the redundant instruction
22193 /// combining in this recursive walk.
22194 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22195 ArrayRef<int> RootMask,
22196 int Depth, bool HasPSHUFB,
22198 TargetLowering::DAGCombinerInfo &DCI,
22199 const X86Subtarget *Subtarget) {
22200 // Bound the depth of our recursive combine because this is ultimately
22201 // quadratic in nature.
22205 // Directly rip through bitcasts to find the underlying operand.
22206 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22207 Op = Op.getOperand(0);
22209 MVT VT = Op.getSimpleValueType();
22210 if (!VT.isVector())
22211 return false; // Bail if we hit a non-vector.
22212 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22213 // version should be added.
22214 if (VT.getSizeInBits() != 128)
22217 assert(Root.getSimpleValueType().isVector() &&
22218 "Shuffles operate on vector types!");
22219 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22220 "Can only combine shuffles of the same vector register size.");
22222 if (!isTargetShuffle(Op.getOpcode()))
22224 SmallVector<int, 16> OpMask;
22226 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22227 // We only can combine unary shuffles which we can decode the mask for.
22228 if (!HaveMask || !IsUnary)
22231 assert(VT.getVectorNumElements() == OpMask.size() &&
22232 "Different mask size from vector size!");
22233 assert(((RootMask.size() > OpMask.size() &&
22234 RootMask.size() % OpMask.size() == 0) ||
22235 (OpMask.size() > RootMask.size() &&
22236 OpMask.size() % RootMask.size() == 0) ||
22237 OpMask.size() == RootMask.size()) &&
22238 "The smaller number of elements must divide the larger.");
22239 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22240 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22241 assert(((RootRatio == 1 && OpRatio == 1) ||
22242 (RootRatio == 1) != (OpRatio == 1)) &&
22243 "Must not have a ratio for both incoming and op masks!");
22245 SmallVector<int, 16> Mask;
22246 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22248 // Merge this shuffle operation's mask into our accumulated mask. Note that
22249 // this shuffle's mask will be the first applied to the input, followed by the
22250 // root mask to get us all the way to the root value arrangement. The reason
22251 // for this order is that we are recursing up the operation chain.
22252 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22253 int RootIdx = i / RootRatio;
22254 if (RootMask[RootIdx] < 0) {
22255 // This is a zero or undef lane, we're done.
22256 Mask.push_back(RootMask[RootIdx]);
22260 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22261 int OpIdx = RootMaskedIdx / OpRatio;
22262 if (OpMask[OpIdx] < 0) {
22263 // The incoming lanes are zero or undef, it doesn't matter which ones we
22265 Mask.push_back(OpMask[OpIdx]);
22269 // Ok, we have non-zero lanes, map them through.
22270 Mask.push_back(OpMask[OpIdx] * OpRatio +
22271 RootMaskedIdx % OpRatio);
22274 // See if we can recurse into the operand to combine more things.
22275 switch (Op.getOpcode()) {
22276 case X86ISD::PSHUFB:
22278 case X86ISD::PSHUFD:
22279 case X86ISD::PSHUFHW:
22280 case X86ISD::PSHUFLW:
22281 if (Op.getOperand(0).hasOneUse() &&
22282 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22283 HasPSHUFB, DAG, DCI, Subtarget))
22287 case X86ISD::UNPCKL:
22288 case X86ISD::UNPCKH:
22289 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22290 // We can't check for single use, we have to check that this shuffle is the only user.
22291 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22292 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22293 HasPSHUFB, DAG, DCI, Subtarget))
22298 // Minor canonicalization of the accumulated shuffle mask to make it easier
22299 // to match below. All this does is detect masks with squential pairs of
22300 // elements, and shrink them to the half-width mask. It does this in a loop
22301 // so it will reduce the size of the mask to the minimal width mask which
22302 // performs an equivalent shuffle.
22303 SmallVector<int, 16> WidenedMask;
22304 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22305 Mask = std::move(WidenedMask);
22306 WidenedMask.clear();
22309 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22313 /// \brief Get the PSHUF-style mask from PSHUF node.
22315 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22316 /// PSHUF-style masks that can be reused with such instructions.
22317 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22318 SmallVector<int, 4> Mask;
22320 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22324 switch (N.getOpcode()) {
22325 case X86ISD::PSHUFD:
22327 case X86ISD::PSHUFLW:
22330 case X86ISD::PSHUFHW:
22331 Mask.erase(Mask.begin(), Mask.begin() + 4);
22332 for (int &M : Mask)
22336 llvm_unreachable("No valid shuffle instruction found!");
22340 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22342 /// We walk up the chain and look for a combinable shuffle, skipping over
22343 /// shuffles that we could hoist this shuffle's transformation past without
22344 /// altering anything.
22346 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22348 TargetLowering::DAGCombinerInfo &DCI) {
22349 assert(N.getOpcode() == X86ISD::PSHUFD &&
22350 "Called with something other than an x86 128-bit half shuffle!");
22353 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22354 // of the shuffles in the chain so that we can form a fresh chain to replace
22356 SmallVector<SDValue, 8> Chain;
22357 SDValue V = N.getOperand(0);
22358 for (; V.hasOneUse(); V = V.getOperand(0)) {
22359 switch (V.getOpcode()) {
22361 return SDValue(); // Nothing combined!
22364 // Skip bitcasts as we always know the type for the target specific
22368 case X86ISD::PSHUFD:
22369 // Found another dword shuffle.
22372 case X86ISD::PSHUFLW:
22373 // Check that the low words (being shuffled) are the identity in the
22374 // dword shuffle, and the high words are self-contained.
22375 if (Mask[0] != 0 || Mask[1] != 1 ||
22376 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22379 Chain.push_back(V);
22382 case X86ISD::PSHUFHW:
22383 // Check that the high words (being shuffled) are the identity in the
22384 // dword shuffle, and the low words are self-contained.
22385 if (Mask[2] != 2 || Mask[3] != 3 ||
22386 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22389 Chain.push_back(V);
22392 case X86ISD::UNPCKL:
22393 case X86ISD::UNPCKH:
22394 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22395 // shuffle into a preceding word shuffle.
22396 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22399 // Search for a half-shuffle which we can combine with.
22400 unsigned CombineOp =
22401 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22402 if (V.getOperand(0) != V.getOperand(1) ||
22403 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22405 Chain.push_back(V);
22406 V = V.getOperand(0);
22408 switch (V.getOpcode()) {
22410 return SDValue(); // Nothing to combine.
22412 case X86ISD::PSHUFLW:
22413 case X86ISD::PSHUFHW:
22414 if (V.getOpcode() == CombineOp)
22417 Chain.push_back(V);
22421 V = V.getOperand(0);
22425 } while (V.hasOneUse());
22428 // Break out of the loop if we break out of the switch.
22432 if (!V.hasOneUse())
22433 // We fell out of the loop without finding a viable combining instruction.
22436 // Merge this node's mask and our incoming mask.
22437 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22438 for (int &M : Mask)
22440 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22441 getV4X86ShuffleImm8ForMask(Mask, DAG));
22443 // Rebuild the chain around this new shuffle.
22444 while (!Chain.empty()) {
22445 SDValue W = Chain.pop_back_val();
22447 if (V.getValueType() != W.getOperand(0).getValueType())
22448 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22450 switch (W.getOpcode()) {
22452 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22454 case X86ISD::UNPCKL:
22455 case X86ISD::UNPCKH:
22456 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22459 case X86ISD::PSHUFD:
22460 case X86ISD::PSHUFLW:
22461 case X86ISD::PSHUFHW:
22462 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22466 if (V.getValueType() != N.getValueType())
22467 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22469 // Return the new chain to replace N.
22473 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22475 /// We walk up the chain, skipping shuffles of the other half and looking
22476 /// through shuffles which switch halves trying to find a shuffle of the same
22477 /// pair of dwords.
22478 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22480 TargetLowering::DAGCombinerInfo &DCI) {
22482 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22483 "Called with something other than an x86 128-bit half shuffle!");
22485 unsigned CombineOpcode = N.getOpcode();
22487 // Walk up a single-use chain looking for a combinable shuffle.
22488 SDValue V = N.getOperand(0);
22489 for (; V.hasOneUse(); V = V.getOperand(0)) {
22490 switch (V.getOpcode()) {
22492 return false; // Nothing combined!
22495 // Skip bitcasts as we always know the type for the target specific
22499 case X86ISD::PSHUFLW:
22500 case X86ISD::PSHUFHW:
22501 if (V.getOpcode() == CombineOpcode)
22504 // Other-half shuffles are no-ops.
22507 // Break out of the loop if we break out of the switch.
22511 if (!V.hasOneUse())
22512 // We fell out of the loop without finding a viable combining instruction.
22515 // Combine away the bottom node as its shuffle will be accumulated into
22516 // a preceding shuffle.
22517 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22519 // Record the old value.
22522 // Merge this node's mask and our incoming mask (adjusted to account for all
22523 // the pshufd instructions encountered).
22524 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22525 for (int &M : Mask)
22527 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22528 getV4X86ShuffleImm8ForMask(Mask, DAG));
22530 // Check that the shuffles didn't cancel each other out. If not, we need to
22531 // combine to the new one.
22533 // Replace the combinable shuffle with the combined one, updating all users
22534 // so that we re-evaluate the chain here.
22535 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22540 /// \brief Try to combine x86 target specific shuffles.
22541 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22542 TargetLowering::DAGCombinerInfo &DCI,
22543 const X86Subtarget *Subtarget) {
22545 MVT VT = N.getSimpleValueType();
22546 SmallVector<int, 4> Mask;
22548 switch (N.getOpcode()) {
22549 case X86ISD::PSHUFD:
22550 case X86ISD::PSHUFLW:
22551 case X86ISD::PSHUFHW:
22552 Mask = getPSHUFShuffleMask(N);
22553 assert(Mask.size() == 4);
22559 // Nuke no-op shuffles that show up after combining.
22560 if (isNoopShuffleMask(Mask))
22561 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22563 // Look for simplifications involving one or two shuffle instructions.
22564 SDValue V = N.getOperand(0);
22565 switch (N.getOpcode()) {
22568 case X86ISD::PSHUFLW:
22569 case X86ISD::PSHUFHW:
22570 assert(VT == MVT::v8i16);
22573 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22574 return SDValue(); // We combined away this shuffle, so we're done.
22576 // See if this reduces to a PSHUFD which is no more expensive and can
22577 // combine with more operations. Note that it has to at least flip the
22578 // dwords as otherwise it would have been removed as a no-op.
22579 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22580 int DMask[] = {0, 1, 2, 3};
22581 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22582 DMask[DOffset + 0] = DOffset + 1;
22583 DMask[DOffset + 1] = DOffset + 0;
22584 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22585 DCI.AddToWorklist(V.getNode());
22586 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22587 getV4X86ShuffleImm8ForMask(DMask, DAG));
22588 DCI.AddToWorklist(V.getNode());
22589 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22592 // Look for shuffle patterns which can be implemented as a single unpack.
22593 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22594 // only works when we have a PSHUFD followed by two half-shuffles.
22595 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22596 (V.getOpcode() == X86ISD::PSHUFLW ||
22597 V.getOpcode() == X86ISD::PSHUFHW) &&
22598 V.getOpcode() != N.getOpcode() &&
22600 SDValue D = V.getOperand(0);
22601 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22602 D = D.getOperand(0);
22603 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22604 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22605 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22606 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22607 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22609 for (int i = 0; i < 4; ++i) {
22610 WordMask[i + NOffset] = Mask[i] + NOffset;
22611 WordMask[i + VOffset] = VMask[i] + VOffset;
22613 // Map the word mask through the DWord mask.
22615 for (int i = 0; i < 8; ++i)
22616 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22617 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22618 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22619 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22620 std::begin(UnpackLoMask)) ||
22621 std::equal(std::begin(MappedMask), std::end(MappedMask),
22622 std::begin(UnpackHiMask))) {
22623 // We can replace all three shuffles with an unpack.
22624 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22625 DCI.AddToWorklist(V.getNode());
22626 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22628 DL, MVT::v8i16, V, V);
22635 case X86ISD::PSHUFD:
22636 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22645 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22647 /// We combine this directly on the abstract vector shuffle nodes so it is
22648 /// easier to generically match. We also insert dummy vector shuffle nodes for
22649 /// the operands which explicitly discard the lanes which are unused by this
22650 /// operation to try to flow through the rest of the combiner the fact that
22651 /// they're unused.
22652 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22654 EVT VT = N->getValueType(0);
22656 // We only handle target-independent shuffles.
22657 // FIXME: It would be easy and harmless to use the target shuffle mask
22658 // extraction tool to support more.
22659 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22662 auto *SVN = cast<ShuffleVectorSDNode>(N);
22663 ArrayRef<int> Mask = SVN->getMask();
22664 SDValue V1 = N->getOperand(0);
22665 SDValue V2 = N->getOperand(1);
22667 // We require the first shuffle operand to be the SUB node, and the second to
22668 // be the ADD node.
22669 // FIXME: We should support the commuted patterns.
22670 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22673 // If there are other uses of these operations we can't fold them.
22674 if (!V1->hasOneUse() || !V2->hasOneUse())
22677 // Ensure that both operations have the same operands. Note that we can
22678 // commute the FADD operands.
22679 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22680 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22681 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22684 // We're looking for blends between FADD and FSUB nodes. We insist on these
22685 // nodes being lined up in a specific expected pattern.
22686 if (!(isShuffleEquivalent(Mask, 0, 3) ||
22687 isShuffleEquivalent(Mask, 0, 5, 2, 7) ||
22688 isShuffleEquivalent(Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22691 // Only specific types are legal at this point, assert so we notice if and
22692 // when these change.
22693 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22694 VT == MVT::v4f64) &&
22695 "Unknown vector type encountered!");
22697 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22700 /// PerformShuffleCombine - Performs several different shuffle combines.
22701 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22702 TargetLowering::DAGCombinerInfo &DCI,
22703 const X86Subtarget *Subtarget) {
22705 SDValue N0 = N->getOperand(0);
22706 SDValue N1 = N->getOperand(1);
22707 EVT VT = N->getValueType(0);
22709 // Don't create instructions with illegal types after legalize types has run.
22710 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22711 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22714 // If we have legalized the vector types, look for blends of FADD and FSUB
22715 // nodes that we can fuse into an ADDSUB node.
22716 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22717 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22720 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22721 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22722 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22723 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22725 // During Type Legalization, when promoting illegal vector types,
22726 // the backend might introduce new shuffle dag nodes and bitcasts.
22728 // This code performs the following transformation:
22729 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22730 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22732 // We do this only if both the bitcast and the BINOP dag nodes have
22733 // one use. Also, perform this transformation only if the new binary
22734 // operation is legal. This is to avoid introducing dag nodes that
22735 // potentially need to be further expanded (or custom lowered) into a
22736 // less optimal sequence of dag nodes.
22737 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22738 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22739 N0.getOpcode() == ISD::BITCAST) {
22740 SDValue BC0 = N0.getOperand(0);
22741 EVT SVT = BC0.getValueType();
22742 unsigned Opcode = BC0.getOpcode();
22743 unsigned NumElts = VT.getVectorNumElements();
22745 if (BC0.hasOneUse() && SVT.isVector() &&
22746 SVT.getVectorNumElements() * 2 == NumElts &&
22747 TLI.isOperationLegal(Opcode, VT)) {
22748 bool CanFold = false;
22760 unsigned SVTNumElts = SVT.getVectorNumElements();
22761 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22762 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22763 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22764 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22765 CanFold = SVOp->getMaskElt(i) < 0;
22768 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22769 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22770 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22771 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22776 // Only handle 128 wide vector from here on.
22777 if (!VT.is128BitVector())
22780 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22781 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22782 // consecutive, non-overlapping, and in the right order.
22783 SmallVector<SDValue, 16> Elts;
22784 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22785 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22787 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22791 if (isTargetShuffle(N->getOpcode())) {
22793 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22794 if (Shuffle.getNode())
22797 // Try recursively combining arbitrary sequences of x86 shuffle
22798 // instructions into higher-order shuffles. We do this after combining
22799 // specific PSHUF instruction sequences into their minimal form so that we
22800 // can evaluate how many specialized shuffle instructions are involved in
22801 // a particular chain.
22802 SmallVector<int, 1> NonceMask; // Just a placeholder.
22803 NonceMask.push_back(0);
22804 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
22805 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
22807 return SDValue(); // This routine will use CombineTo to replace N.
22813 /// PerformTruncateCombine - Converts truncate operation to
22814 /// a sequence of vector shuffle operations.
22815 /// It is possible when we truncate 256-bit vector to 128-bit vector
22816 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
22817 TargetLowering::DAGCombinerInfo &DCI,
22818 const X86Subtarget *Subtarget) {
22822 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
22823 /// specific shuffle of a load can be folded into a single element load.
22824 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
22825 /// shuffles have been custom lowered so we need to handle those here.
22826 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
22827 TargetLowering::DAGCombinerInfo &DCI) {
22828 if (DCI.isBeforeLegalizeOps())
22831 SDValue InVec = N->getOperand(0);
22832 SDValue EltNo = N->getOperand(1);
22834 if (!isa<ConstantSDNode>(EltNo))
22837 EVT OriginalVT = InVec.getValueType();
22839 if (InVec.getOpcode() == ISD::BITCAST) {
22840 // Don't duplicate a load with other uses.
22841 if (!InVec.hasOneUse())
22843 EVT BCVT = InVec.getOperand(0).getValueType();
22844 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
22846 InVec = InVec.getOperand(0);
22849 EVT CurrentVT = InVec.getValueType();
22851 if (!isTargetShuffle(InVec.getOpcode()))
22854 // Don't duplicate a load with other uses.
22855 if (!InVec.hasOneUse())
22858 SmallVector<int, 16> ShuffleMask;
22860 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
22861 ShuffleMask, UnaryShuffle))
22864 // Select the input vector, guarding against out of range extract vector.
22865 unsigned NumElems = CurrentVT.getVectorNumElements();
22866 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
22867 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
22868 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
22869 : InVec.getOperand(1);
22871 // If inputs to shuffle are the same for both ops, then allow 2 uses
22872 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
22873 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
22875 if (LdNode.getOpcode() == ISD::BITCAST) {
22876 // Don't duplicate a load with other uses.
22877 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
22880 AllowedUses = 1; // only allow 1 load use if we have a bitcast
22881 LdNode = LdNode.getOperand(0);
22884 if (!ISD::isNormalLoad(LdNode.getNode()))
22887 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
22889 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
22892 EVT EltVT = N->getValueType(0);
22893 // If there's a bitcast before the shuffle, check if the load type and
22894 // alignment is valid.
22895 unsigned Align = LN0->getAlignment();
22896 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22897 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
22898 EltVT.getTypeForEVT(*DAG.getContext()));
22900 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
22903 // All checks match so transform back to vector_shuffle so that DAG combiner
22904 // can finish the job
22907 // Create shuffle node taking into account the case that its a unary shuffle
22908 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
22909 : InVec.getOperand(1);
22910 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
22911 InVec.getOperand(0), Shuffle,
22913 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
22914 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
22918 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
22919 /// generation and convert it from being a bunch of shuffles and extracts
22920 /// into a somewhat faster sequence. For i686, the best sequence is apparently
22921 /// storing the value and loading scalars back, while for x64 we should
22922 /// use 64-bit extracts and shifts.
22923 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
22924 TargetLowering::DAGCombinerInfo &DCI) {
22925 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
22926 if (NewOp.getNode())
22929 SDValue InputVector = N->getOperand(0);
22931 // Detect whether we are trying to convert from mmx to i32 and the bitcast
22932 // from mmx to v2i32 has a single usage.
22933 if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST &&
22934 InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx &&
22935 InputVector.hasOneUse() && N->getValueType(0) == MVT::i32)
22936 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
22937 N->getValueType(0),
22938 InputVector.getNode()->getOperand(0));
22940 // Only operate on vectors of 4 elements, where the alternative shuffling
22941 // gets to be more expensive.
22942 if (InputVector.getValueType() != MVT::v4i32)
22945 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
22946 // single use which is a sign-extend or zero-extend, and all elements are
22948 SmallVector<SDNode *, 4> Uses;
22949 unsigned ExtractedElements = 0;
22950 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
22951 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
22952 if (UI.getUse().getResNo() != InputVector.getResNo())
22955 SDNode *Extract = *UI;
22956 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
22959 if (Extract->getValueType(0) != MVT::i32)
22961 if (!Extract->hasOneUse())
22963 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
22964 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
22966 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
22969 // Record which element was extracted.
22970 ExtractedElements |=
22971 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
22973 Uses.push_back(Extract);
22976 // If not all the elements were used, this may not be worthwhile.
22977 if (ExtractedElements != 15)
22980 // Ok, we've now decided to do the transformation.
22981 // If 64-bit shifts are legal, use the extract-shift sequence,
22982 // otherwise bounce the vector off the cache.
22983 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22985 SDLoc dl(InputVector);
22987 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
22988 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
22989 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
22990 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
22991 DAG.getConstant(0, VecIdxTy));
22992 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
22993 DAG.getConstant(1, VecIdxTy));
22995 SDValue ShAmt = DAG.getConstant(32,
22996 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
22997 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
22998 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
22999 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23000 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23001 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23002 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23004 // Store the value to a temporary stack slot.
23005 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23006 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23007 MachinePointerInfo(), false, false, 0);
23009 EVT ElementType = InputVector.getValueType().getVectorElementType();
23010 unsigned EltSize = ElementType.getSizeInBits() / 8;
23012 // Replace each use (extract) with a load of the appropriate element.
23013 for (unsigned i = 0; i < 4; ++i) {
23014 uint64_t Offset = EltSize * i;
23015 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23017 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23018 StackPtr, OffsetVal);
23020 // Load the scalar.
23021 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23022 ScalarAddr, MachinePointerInfo(),
23023 false, false, false, 0);
23028 // Replace the extracts
23029 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23030 UE = Uses.end(); UI != UE; ++UI) {
23031 SDNode *Extract = *UI;
23033 SDValue Idx = Extract->getOperand(1);
23034 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23035 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23038 // The replacement was made in place; don't return anything.
23042 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23043 static std::pair<unsigned, bool>
23044 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23045 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23046 if (!VT.isVector())
23047 return std::make_pair(0, false);
23049 bool NeedSplit = false;
23050 switch (VT.getSimpleVT().SimpleTy) {
23051 default: return std::make_pair(0, false);
23054 if (!Subtarget->hasVLX())
23055 return std::make_pair(0, false);
23059 if (!Subtarget->hasBWI())
23060 return std::make_pair(0, false);
23064 if (!Subtarget->hasAVX512())
23065 return std::make_pair(0, false);
23070 if (!Subtarget->hasAVX2())
23072 if (!Subtarget->hasAVX())
23073 return std::make_pair(0, false);
23078 if (!Subtarget->hasSSE2())
23079 return std::make_pair(0, false);
23082 // SSE2 has only a small subset of the operations.
23083 bool hasUnsigned = Subtarget->hasSSE41() ||
23084 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23085 bool hasSigned = Subtarget->hasSSE41() ||
23086 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23088 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23091 // Check for x CC y ? x : y.
23092 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23093 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23098 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23101 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23104 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23107 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23109 // Check for x CC y ? y : x -- a min/max with reversed arms.
23110 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23111 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23116 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23119 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23122 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23125 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23129 return std::make_pair(Opc, NeedSplit);
23133 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23134 const X86Subtarget *Subtarget) {
23136 SDValue Cond = N->getOperand(0);
23137 SDValue LHS = N->getOperand(1);
23138 SDValue RHS = N->getOperand(2);
23140 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23141 SDValue CondSrc = Cond->getOperand(0);
23142 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23143 Cond = CondSrc->getOperand(0);
23146 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23149 // A vselect where all conditions and data are constants can be optimized into
23150 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23151 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23152 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23155 unsigned MaskValue = 0;
23156 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23159 MVT VT = N->getSimpleValueType(0);
23160 unsigned NumElems = VT.getVectorNumElements();
23161 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23162 for (unsigned i = 0; i < NumElems; ++i) {
23163 // Be sure we emit undef where we can.
23164 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23165 ShuffleMask[i] = -1;
23167 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23170 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23171 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23173 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23176 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23178 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23179 TargetLowering::DAGCombinerInfo &DCI,
23180 const X86Subtarget *Subtarget) {
23182 SDValue Cond = N->getOperand(0);
23183 // Get the LHS/RHS of the select.
23184 SDValue LHS = N->getOperand(1);
23185 SDValue RHS = N->getOperand(2);
23186 EVT VT = LHS.getValueType();
23187 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23189 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23190 // instructions match the semantics of the common C idiom x<y?x:y but not
23191 // x<=y?x:y, because of how they handle negative zero (which can be
23192 // ignored in unsafe-math mode).
23193 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23194 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23195 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23196 (Subtarget->hasSSE2() ||
23197 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23198 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23200 unsigned Opcode = 0;
23201 // Check for x CC y ? x : y.
23202 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23203 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23207 // Converting this to a min would handle NaNs incorrectly, and swapping
23208 // the operands would cause it to handle comparisons between positive
23209 // and negative zero incorrectly.
23210 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23211 if (!DAG.getTarget().Options.UnsafeFPMath &&
23212 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23214 std::swap(LHS, RHS);
23216 Opcode = X86ISD::FMIN;
23219 // Converting this to a min would handle comparisons between positive
23220 // and negative zero incorrectly.
23221 if (!DAG.getTarget().Options.UnsafeFPMath &&
23222 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23224 Opcode = X86ISD::FMIN;
23227 // Converting this to a min would handle both negative zeros and NaNs
23228 // incorrectly, but we can swap the operands to fix both.
23229 std::swap(LHS, RHS);
23233 Opcode = X86ISD::FMIN;
23237 // Converting this to a max would handle comparisons between positive
23238 // and negative zero incorrectly.
23239 if (!DAG.getTarget().Options.UnsafeFPMath &&
23240 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23242 Opcode = X86ISD::FMAX;
23245 // Converting this to a max would handle NaNs incorrectly, and swapping
23246 // the operands would cause it to handle comparisons between positive
23247 // and negative zero incorrectly.
23248 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23249 if (!DAG.getTarget().Options.UnsafeFPMath &&
23250 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23252 std::swap(LHS, RHS);
23254 Opcode = X86ISD::FMAX;
23257 // Converting this to a max would handle both negative zeros and NaNs
23258 // incorrectly, but we can swap the operands to fix both.
23259 std::swap(LHS, RHS);
23263 Opcode = X86ISD::FMAX;
23266 // Check for x CC y ? y : x -- a min/max with reversed arms.
23267 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23268 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23272 // Converting this to a min would handle comparisons between positive
23273 // and negative zero incorrectly, and swapping the operands would
23274 // cause it to handle NaNs incorrectly.
23275 if (!DAG.getTarget().Options.UnsafeFPMath &&
23276 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23277 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23279 std::swap(LHS, RHS);
23281 Opcode = X86ISD::FMIN;
23284 // Converting this to a min would handle NaNs incorrectly.
23285 if (!DAG.getTarget().Options.UnsafeFPMath &&
23286 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23288 Opcode = X86ISD::FMIN;
23291 // Converting this to a min would handle both negative zeros and NaNs
23292 // incorrectly, but we can swap the operands to fix both.
23293 std::swap(LHS, RHS);
23297 Opcode = X86ISD::FMIN;
23301 // Converting this to a max would handle NaNs incorrectly.
23302 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23304 Opcode = X86ISD::FMAX;
23307 // Converting this to a max would handle comparisons between positive
23308 // and negative zero incorrectly, and swapping the operands would
23309 // cause it to handle NaNs incorrectly.
23310 if (!DAG.getTarget().Options.UnsafeFPMath &&
23311 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23312 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23314 std::swap(LHS, RHS);
23316 Opcode = X86ISD::FMAX;
23319 // Converting this to a max would handle both negative zeros and NaNs
23320 // incorrectly, but we can swap the operands to fix both.
23321 std::swap(LHS, RHS);
23325 Opcode = X86ISD::FMAX;
23331 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23334 EVT CondVT = Cond.getValueType();
23335 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23336 CondVT.getVectorElementType() == MVT::i1) {
23337 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23338 // lowering on KNL. In this case we convert it to
23339 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23340 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23341 // Since SKX these selects have a proper lowering.
23342 EVT OpVT = LHS.getValueType();
23343 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23344 (OpVT.getVectorElementType() == MVT::i8 ||
23345 OpVT.getVectorElementType() == MVT::i16) &&
23346 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23347 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23348 DCI.AddToWorklist(Cond.getNode());
23349 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23352 // If this is a select between two integer constants, try to do some
23354 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23355 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23356 // Don't do this for crazy integer types.
23357 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23358 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23359 // so that TrueC (the true value) is larger than FalseC.
23360 bool NeedsCondInvert = false;
23362 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23363 // Efficiently invertible.
23364 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23365 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23366 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23367 NeedsCondInvert = true;
23368 std::swap(TrueC, FalseC);
23371 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23372 if (FalseC->getAPIntValue() == 0 &&
23373 TrueC->getAPIntValue().isPowerOf2()) {
23374 if (NeedsCondInvert) // Invert the condition if needed.
23375 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23376 DAG.getConstant(1, Cond.getValueType()));
23378 // Zero extend the condition if needed.
23379 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23381 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23382 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23383 DAG.getConstant(ShAmt, MVT::i8));
23386 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23387 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23388 if (NeedsCondInvert) // Invert the condition if needed.
23389 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23390 DAG.getConstant(1, Cond.getValueType()));
23392 // Zero extend the condition if needed.
23393 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23394 FalseC->getValueType(0), Cond);
23395 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23396 SDValue(FalseC, 0));
23399 // Optimize cases that will turn into an LEA instruction. This requires
23400 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23401 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23402 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23403 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23405 bool isFastMultiplier = false;
23407 switch ((unsigned char)Diff) {
23409 case 1: // result = add base, cond
23410 case 2: // result = lea base( , cond*2)
23411 case 3: // result = lea base(cond, cond*2)
23412 case 4: // result = lea base( , cond*4)
23413 case 5: // result = lea base(cond, cond*4)
23414 case 8: // result = lea base( , cond*8)
23415 case 9: // result = lea base(cond, cond*8)
23416 isFastMultiplier = true;
23421 if (isFastMultiplier) {
23422 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23423 if (NeedsCondInvert) // Invert the condition if needed.
23424 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23425 DAG.getConstant(1, Cond.getValueType()));
23427 // Zero extend the condition if needed.
23428 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23430 // Scale the condition by the difference.
23432 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23433 DAG.getConstant(Diff, Cond.getValueType()));
23435 // Add the base if non-zero.
23436 if (FalseC->getAPIntValue() != 0)
23437 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23438 SDValue(FalseC, 0));
23445 // Canonicalize max and min:
23446 // (x > y) ? x : y -> (x >= y) ? x : y
23447 // (x < y) ? x : y -> (x <= y) ? x : y
23448 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23449 // the need for an extra compare
23450 // against zero. e.g.
23451 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23453 // testl %edi, %edi
23455 // cmovgl %edi, %eax
23459 // cmovsl %eax, %edi
23460 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23461 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23462 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23463 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23468 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23469 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23470 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23471 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23476 // Early exit check
23477 if (!TLI.isTypeLegal(VT))
23480 // Match VSELECTs into subs with unsigned saturation.
23481 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23482 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23483 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23484 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23485 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23487 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23488 // left side invert the predicate to simplify logic below.
23490 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23492 CC = ISD::getSetCCInverse(CC, true);
23493 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23497 if (Other.getNode() && Other->getNumOperands() == 2 &&
23498 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23499 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23500 SDValue CondRHS = Cond->getOperand(1);
23502 // Look for a general sub with unsigned saturation first.
23503 // x >= y ? x-y : 0 --> subus x, y
23504 // x > y ? x-y : 0 --> subus x, y
23505 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23506 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23507 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23509 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23510 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23511 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23512 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23513 // If the RHS is a constant we have to reverse the const
23514 // canonicalization.
23515 // x > C-1 ? x+-C : 0 --> subus x, C
23516 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23517 CondRHSConst->getAPIntValue() ==
23518 (-OpRHSConst->getAPIntValue() - 1))
23519 return DAG.getNode(
23520 X86ISD::SUBUS, DL, VT, OpLHS,
23521 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23523 // Another special case: If C was a sign bit, the sub has been
23524 // canonicalized into a xor.
23525 // FIXME: Would it be better to use computeKnownBits to determine
23526 // whether it's safe to decanonicalize the xor?
23527 // x s< 0 ? x^C : 0 --> subus x, C
23528 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23529 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23530 OpRHSConst->getAPIntValue().isSignBit())
23531 // Note that we have to rebuild the RHS constant here to ensure we
23532 // don't rely on particular values of undef lanes.
23533 return DAG.getNode(
23534 X86ISD::SUBUS, DL, VT, OpLHS,
23535 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23540 // Try to match a min/max vector operation.
23541 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23542 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23543 unsigned Opc = ret.first;
23544 bool NeedSplit = ret.second;
23546 if (Opc && NeedSplit) {
23547 unsigned NumElems = VT.getVectorNumElements();
23548 // Extract the LHS vectors
23549 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23550 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23552 // Extract the RHS vectors
23553 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23554 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23556 // Create min/max for each subvector
23557 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23558 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23560 // Merge the result
23561 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23563 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23566 // Simplify vector selection if condition value type matches vselect
23568 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23569 assert(Cond.getValueType().isVector() &&
23570 "vector select expects a vector selector!");
23572 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23573 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23575 // Try invert the condition if true value is not all 1s and false value
23577 if (!TValIsAllOnes && !FValIsAllZeros &&
23578 // Check if the selector will be produced by CMPP*/PCMP*
23579 Cond.getOpcode() == ISD::SETCC &&
23580 // Check if SETCC has already been promoted
23581 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23582 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23583 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23585 if (TValIsAllZeros || FValIsAllOnes) {
23586 SDValue CC = Cond.getOperand(2);
23587 ISD::CondCode NewCC =
23588 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23589 Cond.getOperand(0).getValueType().isInteger());
23590 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23591 std::swap(LHS, RHS);
23592 TValIsAllOnes = FValIsAllOnes;
23593 FValIsAllZeros = TValIsAllZeros;
23597 if (TValIsAllOnes || FValIsAllZeros) {
23600 if (TValIsAllOnes && FValIsAllZeros)
23602 else if (TValIsAllOnes)
23603 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23604 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23605 else if (FValIsAllZeros)
23606 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23607 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23609 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23613 // If we know that this node is legal then we know that it is going to be
23614 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23615 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23616 // to simplify previous instructions.
23617 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23618 !DCI.isBeforeLegalize() &&
23619 // We explicitly check against v8i16 and v16i16 because, although
23620 // they're marked as Custom, they might only be legal when Cond is a
23621 // build_vector of constants. This will be taken care in a later
23623 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23624 VT != MVT::v8i16) &&
23625 // Don't optimize vector of constants. Those are handled by
23626 // the generic code and all the bits must be properly set for
23627 // the generic optimizer.
23628 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23629 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23631 // Don't optimize vector selects that map to mask-registers.
23635 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23636 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23638 APInt KnownZero, KnownOne;
23639 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23640 DCI.isBeforeLegalizeOps());
23641 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23642 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23644 // If we changed the computation somewhere in the DAG, this change
23645 // will affect all users of Cond.
23646 // Make sure it is fine and update all the nodes so that we do not
23647 // use the generic VSELECT anymore. Otherwise, we may perform
23648 // wrong optimizations as we messed up with the actual expectation
23649 // for the vector boolean values.
23650 if (Cond != TLO.Old) {
23651 // Check all uses of that condition operand to check whether it will be
23652 // consumed by non-BLEND instructions, which may depend on all bits are
23654 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23656 if (I->getOpcode() != ISD::VSELECT)
23657 // TODO: Add other opcodes eventually lowered into BLEND.
23660 // Update all the users of the condition, before committing the change,
23661 // so that the VSELECT optimizations that expect the correct vector
23662 // boolean value will not be triggered.
23663 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23665 DAG.ReplaceAllUsesOfValueWith(
23667 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23668 Cond, I->getOperand(1), I->getOperand(2)));
23669 DCI.CommitTargetLoweringOpt(TLO);
23672 // At this point, only Cond is changed. Change the condition
23673 // just for N to keep the opportunity to optimize all other
23674 // users their own way.
23675 DAG.ReplaceAllUsesOfValueWith(
23677 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23678 TLO.New, N->getOperand(1), N->getOperand(2)));
23683 // We should generate an X86ISD::BLENDI from a vselect if its argument
23684 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23685 // constants. This specific pattern gets generated when we split a
23686 // selector for a 512 bit vector in a machine without AVX512 (but with
23687 // 256-bit vectors), during legalization:
23689 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23691 // Iff we find this pattern and the build_vectors are built from
23692 // constants, we translate the vselect into a shuffle_vector that we
23693 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23694 if ((N->getOpcode() == ISD::VSELECT ||
23695 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23696 !DCI.isBeforeLegalize()) {
23697 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23698 if (Shuffle.getNode())
23705 // Check whether a boolean test is testing a boolean value generated by
23706 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23709 // Simplify the following patterns:
23710 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23711 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23712 // to (Op EFLAGS Cond)
23714 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23715 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23716 // to (Op EFLAGS !Cond)
23718 // where Op could be BRCOND or CMOV.
23720 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23721 // Quit if not CMP and SUB with its value result used.
23722 if (Cmp.getOpcode() != X86ISD::CMP &&
23723 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23726 // Quit if not used as a boolean value.
23727 if (CC != X86::COND_E && CC != X86::COND_NE)
23730 // Check CMP operands. One of them should be 0 or 1 and the other should be
23731 // an SetCC or extended from it.
23732 SDValue Op1 = Cmp.getOperand(0);
23733 SDValue Op2 = Cmp.getOperand(1);
23736 const ConstantSDNode* C = nullptr;
23737 bool needOppositeCond = (CC == X86::COND_E);
23738 bool checkAgainstTrue = false; // Is it a comparison against 1?
23740 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23742 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23744 else // Quit if all operands are not constants.
23747 if (C->getZExtValue() == 1) {
23748 needOppositeCond = !needOppositeCond;
23749 checkAgainstTrue = true;
23750 } else if (C->getZExtValue() != 0)
23751 // Quit if the constant is neither 0 or 1.
23754 bool truncatedToBoolWithAnd = false;
23755 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23756 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23757 SetCC.getOpcode() == ISD::TRUNCATE ||
23758 SetCC.getOpcode() == ISD::AND) {
23759 if (SetCC.getOpcode() == ISD::AND) {
23761 ConstantSDNode *CS;
23762 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23763 CS->getZExtValue() == 1)
23765 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23766 CS->getZExtValue() == 1)
23770 SetCC = SetCC.getOperand(OpIdx);
23771 truncatedToBoolWithAnd = true;
23773 SetCC = SetCC.getOperand(0);
23776 switch (SetCC.getOpcode()) {
23777 case X86ISD::SETCC_CARRY:
23778 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
23779 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
23780 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
23781 // truncated to i1 using 'and'.
23782 if (checkAgainstTrue && !truncatedToBoolWithAnd)
23784 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
23785 "Invalid use of SETCC_CARRY!");
23787 case X86ISD::SETCC:
23788 // Set the condition code or opposite one if necessary.
23789 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
23790 if (needOppositeCond)
23791 CC = X86::GetOppositeBranchCondition(CC);
23792 return SetCC.getOperand(1);
23793 case X86ISD::CMOV: {
23794 // Check whether false/true value has canonical one, i.e. 0 or 1.
23795 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
23796 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
23797 // Quit if true value is not a constant.
23800 // Quit if false value is not a constant.
23802 SDValue Op = SetCC.getOperand(0);
23803 // Skip 'zext' or 'trunc' node.
23804 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
23805 Op.getOpcode() == ISD::TRUNCATE)
23806 Op = Op.getOperand(0);
23807 // A special case for rdrand/rdseed, where 0 is set if false cond is
23809 if ((Op.getOpcode() != X86ISD::RDRAND &&
23810 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
23813 // Quit if false value is not the constant 0 or 1.
23814 bool FValIsFalse = true;
23815 if (FVal && FVal->getZExtValue() != 0) {
23816 if (FVal->getZExtValue() != 1)
23818 // If FVal is 1, opposite cond is needed.
23819 needOppositeCond = !needOppositeCond;
23820 FValIsFalse = false;
23822 // Quit if TVal is not the constant opposite of FVal.
23823 if (FValIsFalse && TVal->getZExtValue() != 1)
23825 if (!FValIsFalse && TVal->getZExtValue() != 0)
23827 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
23828 if (needOppositeCond)
23829 CC = X86::GetOppositeBranchCondition(CC);
23830 return SetCC.getOperand(3);
23837 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
23838 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
23839 TargetLowering::DAGCombinerInfo &DCI,
23840 const X86Subtarget *Subtarget) {
23843 // If the flag operand isn't dead, don't touch this CMOV.
23844 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
23847 SDValue FalseOp = N->getOperand(0);
23848 SDValue TrueOp = N->getOperand(1);
23849 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
23850 SDValue Cond = N->getOperand(3);
23852 if (CC == X86::COND_E || CC == X86::COND_NE) {
23853 switch (Cond.getOpcode()) {
23857 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
23858 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
23859 return (CC == X86::COND_E) ? FalseOp : TrueOp;
23865 Flags = checkBoolTestSetCCCombine(Cond, CC);
23866 if (Flags.getNode() &&
23867 // Extra check as FCMOV only supports a subset of X86 cond.
23868 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
23869 SDValue Ops[] = { FalseOp, TrueOp,
23870 DAG.getConstant(CC, MVT::i8), Flags };
23871 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
23874 // If this is a select between two integer constants, try to do some
23875 // optimizations. Note that the operands are ordered the opposite of SELECT
23877 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
23878 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
23879 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
23880 // larger than FalseC (the false value).
23881 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
23882 CC = X86::GetOppositeBranchCondition(CC);
23883 std::swap(TrueC, FalseC);
23884 std::swap(TrueOp, FalseOp);
23887 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
23888 // This is efficient for any integer data type (including i8/i16) and
23890 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
23891 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23892 DAG.getConstant(CC, MVT::i8), Cond);
23894 // Zero extend the condition if needed.
23895 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
23897 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23898 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
23899 DAG.getConstant(ShAmt, MVT::i8));
23900 if (N->getNumValues() == 2) // Dead flag value?
23901 return DCI.CombineTo(N, Cond, SDValue());
23905 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
23906 // for any integer data type, including i8/i16.
23907 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23908 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23909 DAG.getConstant(CC, MVT::i8), Cond);
23911 // Zero extend the condition if needed.
23912 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23913 FalseC->getValueType(0), Cond);
23914 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23915 SDValue(FalseC, 0));
23917 if (N->getNumValues() == 2) // Dead flag value?
23918 return DCI.CombineTo(N, Cond, SDValue());
23922 // Optimize cases that will turn into an LEA instruction. This requires
23923 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23924 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23925 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23926 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23928 bool isFastMultiplier = false;
23930 switch ((unsigned char)Diff) {
23932 case 1: // result = add base, cond
23933 case 2: // result = lea base( , cond*2)
23934 case 3: // result = lea base(cond, cond*2)
23935 case 4: // result = lea base( , cond*4)
23936 case 5: // result = lea base(cond, cond*4)
23937 case 8: // result = lea base( , cond*8)
23938 case 9: // result = lea base(cond, cond*8)
23939 isFastMultiplier = true;
23944 if (isFastMultiplier) {
23945 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23946 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23947 DAG.getConstant(CC, MVT::i8), Cond);
23948 // Zero extend the condition if needed.
23949 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23951 // Scale the condition by the difference.
23953 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23954 DAG.getConstant(Diff, Cond.getValueType()));
23956 // Add the base if non-zero.
23957 if (FalseC->getAPIntValue() != 0)
23958 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23959 SDValue(FalseC, 0));
23960 if (N->getNumValues() == 2) // Dead flag value?
23961 return DCI.CombineTo(N, Cond, SDValue());
23968 // Handle these cases:
23969 // (select (x != c), e, c) -> select (x != c), e, x),
23970 // (select (x == c), c, e) -> select (x == c), x, e)
23971 // where the c is an integer constant, and the "select" is the combination
23972 // of CMOV and CMP.
23974 // The rationale for this change is that the conditional-move from a constant
23975 // needs two instructions, however, conditional-move from a register needs
23976 // only one instruction.
23978 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
23979 // some instruction-combining opportunities. This opt needs to be
23980 // postponed as late as possible.
23982 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
23983 // the DCI.xxxx conditions are provided to postpone the optimization as
23984 // late as possible.
23986 ConstantSDNode *CmpAgainst = nullptr;
23987 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
23988 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
23989 !isa<ConstantSDNode>(Cond.getOperand(0))) {
23991 if (CC == X86::COND_NE &&
23992 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
23993 CC = X86::GetOppositeBranchCondition(CC);
23994 std::swap(TrueOp, FalseOp);
23997 if (CC == X86::COND_E &&
23998 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
23999 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24000 DAG.getConstant(CC, MVT::i8), Cond };
24001 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24009 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24010 const X86Subtarget *Subtarget) {
24011 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24013 default: return SDValue();
24014 // SSE/AVX/AVX2 blend intrinsics.
24015 case Intrinsic::x86_avx2_pblendvb:
24016 case Intrinsic::x86_avx2_pblendw:
24017 case Intrinsic::x86_avx2_pblendd_128:
24018 case Intrinsic::x86_avx2_pblendd_256:
24019 // Don't try to simplify this intrinsic if we don't have AVX2.
24020 if (!Subtarget->hasAVX2())
24023 case Intrinsic::x86_avx_blend_pd_256:
24024 case Intrinsic::x86_avx_blend_ps_256:
24025 case Intrinsic::x86_avx_blendv_pd_256:
24026 case Intrinsic::x86_avx_blendv_ps_256:
24027 // Don't try to simplify this intrinsic if we don't have AVX.
24028 if (!Subtarget->hasAVX())
24031 case Intrinsic::x86_sse41_pblendw:
24032 case Intrinsic::x86_sse41_blendpd:
24033 case Intrinsic::x86_sse41_blendps:
24034 case Intrinsic::x86_sse41_blendvps:
24035 case Intrinsic::x86_sse41_blendvpd:
24036 case Intrinsic::x86_sse41_pblendvb: {
24037 SDValue Op0 = N->getOperand(1);
24038 SDValue Op1 = N->getOperand(2);
24039 SDValue Mask = N->getOperand(3);
24041 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24042 if (!Subtarget->hasSSE41())
24045 // fold (blend A, A, Mask) -> A
24048 // fold (blend A, B, allZeros) -> A
24049 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24051 // fold (blend A, B, allOnes) -> B
24052 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24055 // Simplify the case where the mask is a constant i32 value.
24056 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24057 if (C->isNullValue())
24059 if (C->isAllOnesValue())
24066 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24067 case Intrinsic::x86_sse2_psrai_w:
24068 case Intrinsic::x86_sse2_psrai_d:
24069 case Intrinsic::x86_avx2_psrai_w:
24070 case Intrinsic::x86_avx2_psrai_d:
24071 case Intrinsic::x86_sse2_psra_w:
24072 case Intrinsic::x86_sse2_psra_d:
24073 case Intrinsic::x86_avx2_psra_w:
24074 case Intrinsic::x86_avx2_psra_d: {
24075 SDValue Op0 = N->getOperand(1);
24076 SDValue Op1 = N->getOperand(2);
24077 EVT VT = Op0.getValueType();
24078 assert(VT.isVector() && "Expected a vector type!");
24080 if (isa<BuildVectorSDNode>(Op1))
24081 Op1 = Op1.getOperand(0);
24083 if (!isa<ConstantSDNode>(Op1))
24086 EVT SVT = VT.getVectorElementType();
24087 unsigned SVTBits = SVT.getSizeInBits();
24089 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24090 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24091 uint64_t ShAmt = C.getZExtValue();
24093 // Don't try to convert this shift into a ISD::SRA if the shift
24094 // count is bigger than or equal to the element size.
24095 if (ShAmt >= SVTBits)
24098 // Trivial case: if the shift count is zero, then fold this
24099 // into the first operand.
24103 // Replace this packed shift intrinsic with a target independent
24105 SDValue Splat = DAG.getConstant(C, VT);
24106 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24111 /// PerformMulCombine - Optimize a single multiply with constant into two
24112 /// in order to implement it with two cheaper instructions, e.g.
24113 /// LEA + SHL, LEA + LEA.
24114 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24115 TargetLowering::DAGCombinerInfo &DCI) {
24116 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24119 EVT VT = N->getValueType(0);
24120 if (VT != MVT::i64)
24123 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24126 uint64_t MulAmt = C->getZExtValue();
24127 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24130 uint64_t MulAmt1 = 0;
24131 uint64_t MulAmt2 = 0;
24132 if ((MulAmt % 9) == 0) {
24134 MulAmt2 = MulAmt / 9;
24135 } else if ((MulAmt % 5) == 0) {
24137 MulAmt2 = MulAmt / 5;
24138 } else if ((MulAmt % 3) == 0) {
24140 MulAmt2 = MulAmt / 3;
24143 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24146 if (isPowerOf2_64(MulAmt2) &&
24147 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24148 // If second multiplifer is pow2, issue it first. We want the multiply by
24149 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24151 std::swap(MulAmt1, MulAmt2);
24154 if (isPowerOf2_64(MulAmt1))
24155 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24156 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24158 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24159 DAG.getConstant(MulAmt1, VT));
24161 if (isPowerOf2_64(MulAmt2))
24162 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24163 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24165 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24166 DAG.getConstant(MulAmt2, VT));
24168 // Do not add new nodes to DAG combiner worklist.
24169 DCI.CombineTo(N, NewMul, false);
24174 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24175 SDValue N0 = N->getOperand(0);
24176 SDValue N1 = N->getOperand(1);
24177 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24178 EVT VT = N0.getValueType();
24180 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24181 // since the result of setcc_c is all zero's or all ones.
24182 if (VT.isInteger() && !VT.isVector() &&
24183 N1C && N0.getOpcode() == ISD::AND &&
24184 N0.getOperand(1).getOpcode() == ISD::Constant) {
24185 SDValue N00 = N0.getOperand(0);
24186 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24187 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24188 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24189 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24190 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24191 APInt ShAmt = N1C->getAPIntValue();
24192 Mask = Mask.shl(ShAmt);
24194 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24195 N00, DAG.getConstant(Mask, VT));
24199 // Hardware support for vector shifts is sparse which makes us scalarize the
24200 // vector operations in many cases. Also, on sandybridge ADD is faster than
24202 // (shl V, 1) -> add V,V
24203 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24204 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24205 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24206 // We shift all of the values by one. In many cases we do not have
24207 // hardware support for this operation. This is better expressed as an ADD
24209 if (N1SplatC->getZExtValue() == 1)
24210 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24216 /// \brief Returns a vector of 0s if the node in input is a vector logical
24217 /// shift by a constant amount which is known to be bigger than or equal
24218 /// to the vector element size in bits.
24219 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24220 const X86Subtarget *Subtarget) {
24221 EVT VT = N->getValueType(0);
24223 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24224 (!Subtarget->hasInt256() ||
24225 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24228 SDValue Amt = N->getOperand(1);
24230 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24231 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24232 APInt ShiftAmt = AmtSplat->getAPIntValue();
24233 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24235 // SSE2/AVX2 logical shifts always return a vector of 0s
24236 // if the shift amount is bigger than or equal to
24237 // the element size. The constant shift amount will be
24238 // encoded as a 8-bit immediate.
24239 if (ShiftAmt.trunc(8).uge(MaxAmount))
24240 return getZeroVector(VT, Subtarget, DAG, DL);
24246 /// PerformShiftCombine - Combine shifts.
24247 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24248 TargetLowering::DAGCombinerInfo &DCI,
24249 const X86Subtarget *Subtarget) {
24250 if (N->getOpcode() == ISD::SHL) {
24251 SDValue V = PerformSHLCombine(N, DAG);
24252 if (V.getNode()) return V;
24255 if (N->getOpcode() != ISD::SRA) {
24256 // Try to fold this logical shift into a zero vector.
24257 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24258 if (V.getNode()) return V;
24264 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24265 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24266 // and friends. Likewise for OR -> CMPNEQSS.
24267 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24268 TargetLowering::DAGCombinerInfo &DCI,
24269 const X86Subtarget *Subtarget) {
24272 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24273 // we're requiring SSE2 for both.
24274 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24275 SDValue N0 = N->getOperand(0);
24276 SDValue N1 = N->getOperand(1);
24277 SDValue CMP0 = N0->getOperand(1);
24278 SDValue CMP1 = N1->getOperand(1);
24281 // The SETCCs should both refer to the same CMP.
24282 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24285 SDValue CMP00 = CMP0->getOperand(0);
24286 SDValue CMP01 = CMP0->getOperand(1);
24287 EVT VT = CMP00.getValueType();
24289 if (VT == MVT::f32 || VT == MVT::f64) {
24290 bool ExpectingFlags = false;
24291 // Check for any users that want flags:
24292 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24293 !ExpectingFlags && UI != UE; ++UI)
24294 switch (UI->getOpcode()) {
24299 ExpectingFlags = true;
24301 case ISD::CopyToReg:
24302 case ISD::SIGN_EXTEND:
24303 case ISD::ZERO_EXTEND:
24304 case ISD::ANY_EXTEND:
24308 if (!ExpectingFlags) {
24309 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24310 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24312 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24313 X86::CondCode tmp = cc0;
24318 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24319 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24320 // FIXME: need symbolic constants for these magic numbers.
24321 // See X86ATTInstPrinter.cpp:printSSECC().
24322 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24323 if (Subtarget->hasAVX512()) {
24324 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24325 CMP01, DAG.getConstant(x86cc, MVT::i8));
24326 if (N->getValueType(0) != MVT::i1)
24327 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24331 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24332 CMP00.getValueType(), CMP00, CMP01,
24333 DAG.getConstant(x86cc, MVT::i8));
24335 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24336 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24338 if (is64BitFP && !Subtarget->is64Bit()) {
24339 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24340 // 64-bit integer, since that's not a legal type. Since
24341 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24342 // bits, but can do this little dance to extract the lowest 32 bits
24343 // and work with those going forward.
24344 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24346 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24348 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24349 Vector32, DAG.getIntPtrConstant(0));
24353 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24354 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24355 DAG.getConstant(1, IntVT));
24356 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24357 return OneBitOfTruth;
24365 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24366 /// so it can be folded inside ANDNP.
24367 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24368 EVT VT = N->getValueType(0);
24370 // Match direct AllOnes for 128 and 256-bit vectors
24371 if (ISD::isBuildVectorAllOnes(N))
24374 // Look through a bit convert.
24375 if (N->getOpcode() == ISD::BITCAST)
24376 N = N->getOperand(0).getNode();
24378 // Sometimes the operand may come from a insert_subvector building a 256-bit
24380 if (VT.is256BitVector() &&
24381 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24382 SDValue V1 = N->getOperand(0);
24383 SDValue V2 = N->getOperand(1);
24385 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24386 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24387 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24388 ISD::isBuildVectorAllOnes(V2.getNode()))
24395 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24396 // register. In most cases we actually compare or select YMM-sized registers
24397 // and mixing the two types creates horrible code. This method optimizes
24398 // some of the transition sequences.
24399 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24400 TargetLowering::DAGCombinerInfo &DCI,
24401 const X86Subtarget *Subtarget) {
24402 EVT VT = N->getValueType(0);
24403 if (!VT.is256BitVector())
24406 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24407 N->getOpcode() == ISD::ZERO_EXTEND ||
24408 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24410 SDValue Narrow = N->getOperand(0);
24411 EVT NarrowVT = Narrow->getValueType(0);
24412 if (!NarrowVT.is128BitVector())
24415 if (Narrow->getOpcode() != ISD::XOR &&
24416 Narrow->getOpcode() != ISD::AND &&
24417 Narrow->getOpcode() != ISD::OR)
24420 SDValue N0 = Narrow->getOperand(0);
24421 SDValue N1 = Narrow->getOperand(1);
24424 // The Left side has to be a trunc.
24425 if (N0.getOpcode() != ISD::TRUNCATE)
24428 // The type of the truncated inputs.
24429 EVT WideVT = N0->getOperand(0)->getValueType(0);
24433 // The right side has to be a 'trunc' or a constant vector.
24434 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24435 ConstantSDNode *RHSConstSplat = nullptr;
24436 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24437 RHSConstSplat = RHSBV->getConstantSplatNode();
24438 if (!RHSTrunc && !RHSConstSplat)
24441 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24443 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24446 // Set N0 and N1 to hold the inputs to the new wide operation.
24447 N0 = N0->getOperand(0);
24448 if (RHSConstSplat) {
24449 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24450 SDValue(RHSConstSplat, 0));
24451 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24452 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24453 } else if (RHSTrunc) {
24454 N1 = N1->getOperand(0);
24457 // Generate the wide operation.
24458 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24459 unsigned Opcode = N->getOpcode();
24461 case ISD::ANY_EXTEND:
24463 case ISD::ZERO_EXTEND: {
24464 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24465 APInt Mask = APInt::getAllOnesValue(InBits);
24466 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24467 return DAG.getNode(ISD::AND, DL, VT,
24468 Op, DAG.getConstant(Mask, VT));
24470 case ISD::SIGN_EXTEND:
24471 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24472 Op, DAG.getValueType(NarrowVT));
24474 llvm_unreachable("Unexpected opcode");
24478 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24479 TargetLowering::DAGCombinerInfo &DCI,
24480 const X86Subtarget *Subtarget) {
24481 EVT VT = N->getValueType(0);
24482 if (DCI.isBeforeLegalizeOps())
24485 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24489 // Create BEXTR instructions
24490 // BEXTR is ((X >> imm) & (2**size-1))
24491 if (VT == MVT::i32 || VT == MVT::i64) {
24492 SDValue N0 = N->getOperand(0);
24493 SDValue N1 = N->getOperand(1);
24496 // Check for BEXTR.
24497 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24498 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24499 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24500 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24501 if (MaskNode && ShiftNode) {
24502 uint64_t Mask = MaskNode->getZExtValue();
24503 uint64_t Shift = ShiftNode->getZExtValue();
24504 if (isMask_64(Mask)) {
24505 uint64_t MaskSize = CountPopulation_64(Mask);
24506 if (Shift + MaskSize <= VT.getSizeInBits())
24507 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24508 DAG.getConstant(Shift | (MaskSize << 8), VT));
24516 // Want to form ANDNP nodes:
24517 // 1) In the hopes of then easily combining them with OR and AND nodes
24518 // to form PBLEND/PSIGN.
24519 // 2) To match ANDN packed intrinsics
24520 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24523 SDValue N0 = N->getOperand(0);
24524 SDValue N1 = N->getOperand(1);
24527 // Check LHS for vnot
24528 if (N0.getOpcode() == ISD::XOR &&
24529 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24530 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24531 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24533 // Check RHS for vnot
24534 if (N1.getOpcode() == ISD::XOR &&
24535 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24536 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24537 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24542 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24543 TargetLowering::DAGCombinerInfo &DCI,
24544 const X86Subtarget *Subtarget) {
24545 if (DCI.isBeforeLegalizeOps())
24548 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24552 SDValue N0 = N->getOperand(0);
24553 SDValue N1 = N->getOperand(1);
24554 EVT VT = N->getValueType(0);
24556 // look for psign/blend
24557 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24558 if (!Subtarget->hasSSSE3() ||
24559 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24562 // Canonicalize pandn to RHS
24563 if (N0.getOpcode() == X86ISD::ANDNP)
24565 // or (and (m, y), (pandn m, x))
24566 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24567 SDValue Mask = N1.getOperand(0);
24568 SDValue X = N1.getOperand(1);
24570 if (N0.getOperand(0) == Mask)
24571 Y = N0.getOperand(1);
24572 if (N0.getOperand(1) == Mask)
24573 Y = N0.getOperand(0);
24575 // Check to see if the mask appeared in both the AND and ANDNP and
24579 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24580 // Look through mask bitcast.
24581 if (Mask.getOpcode() == ISD::BITCAST)
24582 Mask = Mask.getOperand(0);
24583 if (X.getOpcode() == ISD::BITCAST)
24584 X = X.getOperand(0);
24585 if (Y.getOpcode() == ISD::BITCAST)
24586 Y = Y.getOperand(0);
24588 EVT MaskVT = Mask.getValueType();
24590 // Validate that the Mask operand is a vector sra node.
24591 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24592 // there is no psrai.b
24593 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24594 unsigned SraAmt = ~0;
24595 if (Mask.getOpcode() == ISD::SRA) {
24596 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24597 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24598 SraAmt = AmtConst->getZExtValue();
24599 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24600 SDValue SraC = Mask.getOperand(1);
24601 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24603 if ((SraAmt + 1) != EltBits)
24608 // Now we know we at least have a plendvb with the mask val. See if
24609 // we can form a psignb/w/d.
24610 // psign = x.type == y.type == mask.type && y = sub(0, x);
24611 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24612 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24613 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24614 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24615 "Unsupported VT for PSIGN");
24616 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24617 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24619 // PBLENDVB only available on SSE 4.1
24620 if (!Subtarget->hasSSE41())
24623 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24625 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24626 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24627 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24628 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24629 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24633 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24636 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24637 MachineFunction &MF = DAG.getMachineFunction();
24638 bool OptForSize = MF.getFunction()->getAttributes().
24639 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
24641 // SHLD/SHRD instructions have lower register pressure, but on some
24642 // platforms they have higher latency than the equivalent
24643 // series of shifts/or that would otherwise be generated.
24644 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24645 // have higher latencies and we are not optimizing for size.
24646 if (!OptForSize && Subtarget->isSHLDSlow())
24649 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24651 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24653 if (!N0.hasOneUse() || !N1.hasOneUse())
24656 SDValue ShAmt0 = N0.getOperand(1);
24657 if (ShAmt0.getValueType() != MVT::i8)
24659 SDValue ShAmt1 = N1.getOperand(1);
24660 if (ShAmt1.getValueType() != MVT::i8)
24662 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24663 ShAmt0 = ShAmt0.getOperand(0);
24664 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24665 ShAmt1 = ShAmt1.getOperand(0);
24668 unsigned Opc = X86ISD::SHLD;
24669 SDValue Op0 = N0.getOperand(0);
24670 SDValue Op1 = N1.getOperand(0);
24671 if (ShAmt0.getOpcode() == ISD::SUB) {
24672 Opc = X86ISD::SHRD;
24673 std::swap(Op0, Op1);
24674 std::swap(ShAmt0, ShAmt1);
24677 unsigned Bits = VT.getSizeInBits();
24678 if (ShAmt1.getOpcode() == ISD::SUB) {
24679 SDValue Sum = ShAmt1.getOperand(0);
24680 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24681 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24682 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24683 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24684 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24685 return DAG.getNode(Opc, DL, VT,
24687 DAG.getNode(ISD::TRUNCATE, DL,
24690 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24691 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24693 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24694 return DAG.getNode(Opc, DL, VT,
24695 N0.getOperand(0), N1.getOperand(0),
24696 DAG.getNode(ISD::TRUNCATE, DL,
24703 // Generate NEG and CMOV for integer abs.
24704 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24705 EVT VT = N->getValueType(0);
24707 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24708 // 8-bit integer abs to NEG and CMOV.
24709 if (VT.isInteger() && VT.getSizeInBits() == 8)
24712 SDValue N0 = N->getOperand(0);
24713 SDValue N1 = N->getOperand(1);
24716 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24717 // and change it to SUB and CMOV.
24718 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24719 N0.getOpcode() == ISD::ADD &&
24720 N0.getOperand(1) == N1 &&
24721 N1.getOpcode() == ISD::SRA &&
24722 N1.getOperand(0) == N0.getOperand(0))
24723 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24724 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24725 // Generate SUB & CMOV.
24726 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24727 DAG.getConstant(0, VT), N0.getOperand(0));
24729 SDValue Ops[] = { N0.getOperand(0), Neg,
24730 DAG.getConstant(X86::COND_GE, MVT::i8),
24731 SDValue(Neg.getNode(), 1) };
24732 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24737 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24738 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24739 TargetLowering::DAGCombinerInfo &DCI,
24740 const X86Subtarget *Subtarget) {
24741 if (DCI.isBeforeLegalizeOps())
24744 if (Subtarget->hasCMov()) {
24745 SDValue RV = performIntegerAbsCombine(N, DAG);
24753 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
24754 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
24755 TargetLowering::DAGCombinerInfo &DCI,
24756 const X86Subtarget *Subtarget) {
24757 LoadSDNode *Ld = cast<LoadSDNode>(N);
24758 EVT RegVT = Ld->getValueType(0);
24759 EVT MemVT = Ld->getMemoryVT();
24760 SDValue Ptr = Ld->getBasePtr();
24761 SDValue Chain = Ld->getChain();
24763 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24765 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
24766 // into two 16-byte operations.
24767 ISD::LoadExtType Ext = Ld->getExtensionType();
24768 unsigned Alignment = Ld->getAlignment();
24769 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
24770 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24771 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
24772 unsigned NumElems = RegVT.getVectorNumElements();
24776 SDValue Ptr = Ld->getBasePtr();
24777 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
24779 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
24781 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24782 Ld->getPointerInfo(), Ld->isVolatile(),
24783 Ld->isNonTemporal(), Ld->isInvariant(),
24785 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
24786 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24787 Ld->getPointerInfo(), Ld->isVolatile(),
24788 Ld->isNonTemporal(), Ld->isInvariant(),
24789 std::min(16U, Alignment));
24790 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
24792 Load2.getValue(1));
24794 SDValue NewVec = DAG.getUNDEF(RegVT);
24795 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
24796 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
24797 return DCI.CombineTo(N, NewVec, TF, true);
24800 // Conversion from x86mmx/i64 to v2i64 types is often done via stack
24801 // store/load. Under certain conditions we can bypass the memory access and
24802 // combine this load to use a scalar_to_vector instead. This leads to
24803 // a reduction in the stack use, redundant emission of shuffles and create
24804 // isel matching candidates for movq2dq instructions.
24805 if (RegVT == MVT::v2i64 && Subtarget->hasSSE2() && Ext == ISD::EXTLOAD &&
24806 !Ld->isVolatile() && ISD::isNON_TRUNCStore(Chain.getNode())) {
24808 // If this load is directly stored, get the original source value.
24809 StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
24810 EVT SrcTy = PrevST->getValue().getValueType();
24811 if (PrevST->getBasePtr() != Ptr ||
24812 !(SrcTy == MVT::i64 || SrcTy == MVT::x86mmx))
24814 SDValue SrcVal = Chain.getOperand(1);
24816 // On 32bit systems, we can't save 64bit integers, use f64 instead.
24817 bool Usef64 = TLI.isTypeLegal(MVT::f64) && !Subtarget->is64Bit();
24819 SrcVal = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SrcVal);
24820 SrcVal = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, Usef64 ? MVT::v2f64 : RegVT,
24823 return DCI.CombineTo(N, Usef64 ?
24824 DAG.getNode(ISD::BITCAST, dl, RegVT, SrcVal) : SrcVal, Chain);
24830 /// PerformMLOADCombine - Resolve extending loads
24831 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
24832 TargetLowering::DAGCombinerInfo &DCI,
24833 const X86Subtarget *Subtarget) {
24834 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
24835 if (Mld->getExtensionType() != ISD::SEXTLOAD)
24838 EVT VT = Mld->getValueType(0);
24839 unsigned NumElems = VT.getVectorNumElements();
24840 EVT LdVT = Mld->getMemoryVT();
24843 assert(LdVT != VT && "Cannot extend to the same type");
24844 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
24845 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
24846 // From, To sizes and ElemCount must be pow of two
24847 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24848 "Unexpected size for extending masked load");
24850 unsigned SizeRatio = ToSz / FromSz;
24851 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
24853 // Create a type on which we perform the shuffle
24854 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24855 LdVT.getScalarType(), NumElems*SizeRatio);
24856 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24858 // Convert Src0 value
24859 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
24860 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
24861 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24862 for (unsigned i = 0; i != NumElems; ++i)
24863 ShuffleVec[i] = i * SizeRatio;
24865 // Can't shuffle using an illegal type.
24866 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24867 && "WideVecVT should be legal");
24868 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
24869 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
24871 // Prepare the new mask
24873 SDValue Mask = Mld->getMask();
24874 if (Mask.getValueType() == VT) {
24875 // Mask and original value have the same type
24876 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24877 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24878 for (unsigned i = 0; i != NumElems; ++i)
24879 ShuffleVec[i] = i * SizeRatio;
24880 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24881 ShuffleVec[i] = NumElems*SizeRatio;
24882 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24883 DAG.getConstant(0, WideVecVT),
24887 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24888 unsigned WidenNumElts = NumElems*SizeRatio;
24889 unsigned MaskNumElts = VT.getVectorNumElements();
24890 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24893 unsigned NumConcat = WidenNumElts / MaskNumElts;
24894 SmallVector<SDValue, 16> Ops(NumConcat);
24895 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24897 for (unsigned i = 1; i != NumConcat; ++i)
24900 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24903 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
24904 Mld->getBasePtr(), NewMask, WideSrc0,
24905 Mld->getMemoryVT(), Mld->getMemOperand(),
24907 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
24908 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
24911 /// PerformMSTORECombine - Resolve truncating stores
24912 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
24913 const X86Subtarget *Subtarget) {
24914 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
24915 if (!Mst->isTruncatingStore())
24918 EVT VT = Mst->getValue().getValueType();
24919 unsigned NumElems = VT.getVectorNumElements();
24920 EVT StVT = Mst->getMemoryVT();
24923 assert(StVT != VT && "Cannot truncate to the same type");
24924 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24925 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24927 // From, To sizes and ElemCount must be pow of two
24928 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24929 "Unexpected size for truncating masked store");
24930 // We are going to use the original vector elt for storing.
24931 // Accumulated smaller vector elements must be a multiple of the store size.
24932 assert (((NumElems * FromSz) % ToSz) == 0 &&
24933 "Unexpected ratio for truncating masked store");
24935 unsigned SizeRatio = FromSz / ToSz;
24936 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24938 // Create a type on which we perform the shuffle
24939 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24940 StVT.getScalarType(), NumElems*SizeRatio);
24942 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24944 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
24945 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24946 for (unsigned i = 0; i != NumElems; ++i)
24947 ShuffleVec[i] = i * SizeRatio;
24949 // Can't shuffle using an illegal type.
24950 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24951 && "WideVecVT should be legal");
24953 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
24954 DAG.getUNDEF(WideVecVT),
24958 SDValue Mask = Mst->getMask();
24959 if (Mask.getValueType() == VT) {
24960 // Mask and original value have the same type
24961 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24962 for (unsigned i = 0; i != NumElems; ++i)
24963 ShuffleVec[i] = i * SizeRatio;
24964 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24965 ShuffleVec[i] = NumElems*SizeRatio;
24966 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24967 DAG.getConstant(0, WideVecVT),
24971 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24972 unsigned WidenNumElts = NumElems*SizeRatio;
24973 unsigned MaskNumElts = VT.getVectorNumElements();
24974 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24977 unsigned NumConcat = WidenNumElts / MaskNumElts;
24978 SmallVector<SDValue, 16> Ops(NumConcat);
24979 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24981 for (unsigned i = 1; i != NumConcat; ++i)
24984 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24987 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
24988 NewMask, StVT, Mst->getMemOperand(), false);
24990 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
24991 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
24992 const X86Subtarget *Subtarget) {
24993 StoreSDNode *St = cast<StoreSDNode>(N);
24994 EVT VT = St->getValue().getValueType();
24995 EVT StVT = St->getMemoryVT();
24997 SDValue StoredVal = St->getOperand(1);
24998 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25000 // If we are saving a concatenation of two XMM registers and 32-byte stores
25001 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25002 unsigned Alignment = St->getAlignment();
25003 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25004 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25005 StVT == VT && !IsAligned) {
25006 unsigned NumElems = VT.getVectorNumElements();
25010 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25011 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25013 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25014 SDValue Ptr0 = St->getBasePtr();
25015 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25017 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25018 St->getPointerInfo(), St->isVolatile(),
25019 St->isNonTemporal(), Alignment);
25020 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25021 St->getPointerInfo(), St->isVolatile(),
25022 St->isNonTemporal(),
25023 std::min(16U, Alignment));
25024 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25027 // Optimize trunc store (of multiple scalars) to shuffle and store.
25028 // First, pack all of the elements in one place. Next, store to memory
25029 // in fewer chunks.
25030 if (St->isTruncatingStore() && VT.isVector()) {
25031 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25032 unsigned NumElems = VT.getVectorNumElements();
25033 assert(StVT != VT && "Cannot truncate to the same type");
25034 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25035 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25037 // From, To sizes and ElemCount must be pow of two
25038 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25039 // We are going to use the original vector elt for storing.
25040 // Accumulated smaller vector elements must be a multiple of the store size.
25041 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25043 unsigned SizeRatio = FromSz / ToSz;
25045 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25047 // Create a type on which we perform the shuffle
25048 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25049 StVT.getScalarType(), NumElems*SizeRatio);
25051 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25053 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25054 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25055 for (unsigned i = 0; i != NumElems; ++i)
25056 ShuffleVec[i] = i * SizeRatio;
25058 // Can't shuffle using an illegal type.
25059 if (!TLI.isTypeLegal(WideVecVT))
25062 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25063 DAG.getUNDEF(WideVecVT),
25065 // At this point all of the data is stored at the bottom of the
25066 // register. We now need to save it to mem.
25068 // Find the largest store unit
25069 MVT StoreType = MVT::i8;
25070 for (MVT Tp : MVT::integer_valuetypes()) {
25071 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25075 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25076 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25077 (64 <= NumElems * ToSz))
25078 StoreType = MVT::f64;
25080 // Bitcast the original vector into a vector of store-size units
25081 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25082 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25083 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25084 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25085 SmallVector<SDValue, 8> Chains;
25086 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25087 TLI.getPointerTy());
25088 SDValue Ptr = St->getBasePtr();
25090 // Perform one or more big stores into memory.
25091 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25092 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25093 StoreType, ShuffWide,
25094 DAG.getIntPtrConstant(i));
25095 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25096 St->getPointerInfo(), St->isVolatile(),
25097 St->isNonTemporal(), St->getAlignment());
25098 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25099 Chains.push_back(Ch);
25102 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25105 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25106 // the FP state in cases where an emms may be missing.
25107 // A preferable solution to the general problem is to figure out the right
25108 // places to insert EMMS. This qualifies as a quick hack.
25110 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25111 if (VT.getSizeInBits() != 64)
25114 const Function *F = DAG.getMachineFunction().getFunction();
25115 bool NoImplicitFloatOps = F->getAttributes().
25116 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
25117 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25118 && Subtarget->hasSSE2();
25119 if ((VT.isVector() ||
25120 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25121 isa<LoadSDNode>(St->getValue()) &&
25122 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25123 St->getChain().hasOneUse() && !St->isVolatile()) {
25124 SDNode* LdVal = St->getValue().getNode();
25125 LoadSDNode *Ld = nullptr;
25126 int TokenFactorIndex = -1;
25127 SmallVector<SDValue, 8> Ops;
25128 SDNode* ChainVal = St->getChain().getNode();
25129 // Must be a store of a load. We currently handle two cases: the load
25130 // is a direct child, and it's under an intervening TokenFactor. It is
25131 // possible to dig deeper under nested TokenFactors.
25132 if (ChainVal == LdVal)
25133 Ld = cast<LoadSDNode>(St->getChain());
25134 else if (St->getValue().hasOneUse() &&
25135 ChainVal->getOpcode() == ISD::TokenFactor) {
25136 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25137 if (ChainVal->getOperand(i).getNode() == LdVal) {
25138 TokenFactorIndex = i;
25139 Ld = cast<LoadSDNode>(St->getValue());
25141 Ops.push_back(ChainVal->getOperand(i));
25145 if (!Ld || !ISD::isNormalLoad(Ld))
25148 // If this is not the MMX case, i.e. we are just turning i64 load/store
25149 // into f64 load/store, avoid the transformation if there are multiple
25150 // uses of the loaded value.
25151 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25156 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25157 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25159 if (Subtarget->is64Bit() || F64IsLegal) {
25160 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25161 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25162 Ld->getPointerInfo(), Ld->isVolatile(),
25163 Ld->isNonTemporal(), Ld->isInvariant(),
25164 Ld->getAlignment());
25165 SDValue NewChain = NewLd.getValue(1);
25166 if (TokenFactorIndex != -1) {
25167 Ops.push_back(NewChain);
25168 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25170 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25171 St->getPointerInfo(),
25172 St->isVolatile(), St->isNonTemporal(),
25173 St->getAlignment());
25176 // Otherwise, lower to two pairs of 32-bit loads / stores.
25177 SDValue LoAddr = Ld->getBasePtr();
25178 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25179 DAG.getConstant(4, MVT::i32));
25181 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25182 Ld->getPointerInfo(),
25183 Ld->isVolatile(), Ld->isNonTemporal(),
25184 Ld->isInvariant(), Ld->getAlignment());
25185 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25186 Ld->getPointerInfo().getWithOffset(4),
25187 Ld->isVolatile(), Ld->isNonTemporal(),
25189 MinAlign(Ld->getAlignment(), 4));
25191 SDValue NewChain = LoLd.getValue(1);
25192 if (TokenFactorIndex != -1) {
25193 Ops.push_back(LoLd);
25194 Ops.push_back(HiLd);
25195 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25198 LoAddr = St->getBasePtr();
25199 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25200 DAG.getConstant(4, MVT::i32));
25202 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25203 St->getPointerInfo(),
25204 St->isVolatile(), St->isNonTemporal(),
25205 St->getAlignment());
25206 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25207 St->getPointerInfo().getWithOffset(4),
25209 St->isNonTemporal(),
25210 MinAlign(St->getAlignment(), 4));
25211 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25216 /// Return 'true' if this vector operation is "horizontal"
25217 /// and return the operands for the horizontal operation in LHS and RHS. A
25218 /// horizontal operation performs the binary operation on successive elements
25219 /// of its first operand, then on successive elements of its second operand,
25220 /// returning the resulting values in a vector. For example, if
25221 /// A = < float a0, float a1, float a2, float a3 >
25223 /// B = < float b0, float b1, float b2, float b3 >
25224 /// then the result of doing a horizontal operation on A and B is
25225 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25226 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25227 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25228 /// set to A, RHS to B, and the routine returns 'true'.
25229 /// Note that the binary operation should have the property that if one of the
25230 /// operands is UNDEF then the result is UNDEF.
25231 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25232 // Look for the following pattern: if
25233 // A = < float a0, float a1, float a2, float a3 >
25234 // B = < float b0, float b1, float b2, float b3 >
25236 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25237 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25238 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25239 // which is A horizontal-op B.
25241 // At least one of the operands should be a vector shuffle.
25242 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25243 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25246 MVT VT = LHS.getSimpleValueType();
25248 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25249 "Unsupported vector type for horizontal add/sub");
25251 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25252 // operate independently on 128-bit lanes.
25253 unsigned NumElts = VT.getVectorNumElements();
25254 unsigned NumLanes = VT.getSizeInBits()/128;
25255 unsigned NumLaneElts = NumElts / NumLanes;
25256 assert((NumLaneElts % 2 == 0) &&
25257 "Vector type should have an even number of elements in each lane");
25258 unsigned HalfLaneElts = NumLaneElts/2;
25260 // View LHS in the form
25261 // LHS = VECTOR_SHUFFLE A, B, LMask
25262 // If LHS is not a shuffle then pretend it is the shuffle
25263 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25264 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25267 SmallVector<int, 16> LMask(NumElts);
25268 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25269 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25270 A = LHS.getOperand(0);
25271 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25272 B = LHS.getOperand(1);
25273 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25274 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25276 if (LHS.getOpcode() != ISD::UNDEF)
25278 for (unsigned i = 0; i != NumElts; ++i)
25282 // Likewise, view RHS in the form
25283 // RHS = VECTOR_SHUFFLE C, D, RMask
25285 SmallVector<int, 16> RMask(NumElts);
25286 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25287 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25288 C = RHS.getOperand(0);
25289 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25290 D = RHS.getOperand(1);
25291 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25292 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25294 if (RHS.getOpcode() != ISD::UNDEF)
25296 for (unsigned i = 0; i != NumElts; ++i)
25300 // Check that the shuffles are both shuffling the same vectors.
25301 if (!(A == C && B == D) && !(A == D && B == C))
25304 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25305 if (!A.getNode() && !B.getNode())
25308 // If A and B occur in reverse order in RHS, then "swap" them (which means
25309 // rewriting the mask).
25311 CommuteVectorShuffleMask(RMask, NumElts);
25313 // At this point LHS and RHS are equivalent to
25314 // LHS = VECTOR_SHUFFLE A, B, LMask
25315 // RHS = VECTOR_SHUFFLE A, B, RMask
25316 // Check that the masks correspond to performing a horizontal operation.
25317 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25318 for (unsigned i = 0; i != NumLaneElts; ++i) {
25319 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25321 // Ignore any UNDEF components.
25322 if (LIdx < 0 || RIdx < 0 ||
25323 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25324 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25327 // Check that successive elements are being operated on. If not, this is
25328 // not a horizontal operation.
25329 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25330 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25331 if (!(LIdx == Index && RIdx == Index + 1) &&
25332 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25337 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25338 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25342 /// Do target-specific dag combines on floating point adds.
25343 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25344 const X86Subtarget *Subtarget) {
25345 EVT VT = N->getValueType(0);
25346 SDValue LHS = N->getOperand(0);
25347 SDValue RHS = N->getOperand(1);
25349 // Try to synthesize horizontal adds from adds of shuffles.
25350 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25351 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25352 isHorizontalBinOp(LHS, RHS, true))
25353 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25357 /// Do target-specific dag combines on floating point subs.
25358 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25359 const X86Subtarget *Subtarget) {
25360 EVT VT = N->getValueType(0);
25361 SDValue LHS = N->getOperand(0);
25362 SDValue RHS = N->getOperand(1);
25364 // Try to synthesize horizontal subs from subs of shuffles.
25365 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25366 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25367 isHorizontalBinOp(LHS, RHS, false))
25368 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25372 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25373 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25374 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25375 // F[X]OR(0.0, x) -> x
25376 // F[X]OR(x, 0.0) -> x
25377 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25378 if (C->getValueAPF().isPosZero())
25379 return N->getOperand(1);
25380 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25381 if (C->getValueAPF().isPosZero())
25382 return N->getOperand(0);
25386 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25387 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25388 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25390 // Only perform optimizations if UnsafeMath is used.
25391 if (!DAG.getTarget().Options.UnsafeFPMath)
25394 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25395 // into FMINC and FMAXC, which are Commutative operations.
25396 unsigned NewOp = 0;
25397 switch (N->getOpcode()) {
25398 default: llvm_unreachable("unknown opcode");
25399 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25400 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25403 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25404 N->getOperand(0), N->getOperand(1));
25407 /// Do target-specific dag combines on X86ISD::FAND nodes.
25408 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25409 // FAND(0.0, x) -> 0.0
25410 // FAND(x, 0.0) -> 0.0
25411 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25412 if (C->getValueAPF().isPosZero())
25413 return N->getOperand(0);
25414 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25415 if (C->getValueAPF().isPosZero())
25416 return N->getOperand(1);
25420 /// Do target-specific dag combines on X86ISD::FANDN nodes
25421 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25422 // FANDN(x, 0.0) -> 0.0
25423 // FANDN(0.0, x) -> x
25424 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25425 if (C->getValueAPF().isPosZero())
25426 return N->getOperand(1);
25427 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25428 if (C->getValueAPF().isPosZero())
25429 return N->getOperand(1);
25433 static SDValue PerformBTCombine(SDNode *N,
25435 TargetLowering::DAGCombinerInfo &DCI) {
25436 // BT ignores high bits in the bit index operand.
25437 SDValue Op1 = N->getOperand(1);
25438 if (Op1.hasOneUse()) {
25439 unsigned BitWidth = Op1.getValueSizeInBits();
25440 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25441 APInt KnownZero, KnownOne;
25442 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25443 !DCI.isBeforeLegalizeOps());
25444 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25445 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25446 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25447 DCI.CommitTargetLoweringOpt(TLO);
25452 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25453 SDValue Op = N->getOperand(0);
25454 if (Op.getOpcode() == ISD::BITCAST)
25455 Op = Op.getOperand(0);
25456 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25457 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25458 VT.getVectorElementType().getSizeInBits() ==
25459 OpVT.getVectorElementType().getSizeInBits()) {
25460 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25465 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25466 const X86Subtarget *Subtarget) {
25467 EVT VT = N->getValueType(0);
25468 if (!VT.isVector())
25471 SDValue N0 = N->getOperand(0);
25472 SDValue N1 = N->getOperand(1);
25473 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25476 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25477 // both SSE and AVX2 since there is no sign-extended shift right
25478 // operation on a vector with 64-bit elements.
25479 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25480 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25481 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25482 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25483 SDValue N00 = N0.getOperand(0);
25485 // EXTLOAD has a better solution on AVX2,
25486 // it may be replaced with X86ISD::VSEXT node.
25487 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25488 if (!ISD::isNormalLoad(N00.getNode()))
25491 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25492 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25494 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25500 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25501 TargetLowering::DAGCombinerInfo &DCI,
25502 const X86Subtarget *Subtarget) {
25503 SDValue N0 = N->getOperand(0);
25504 EVT VT = N->getValueType(0);
25506 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25507 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25508 // This exposes the sext to the sdivrem lowering, so that it directly extends
25509 // from AH (which we otherwise need to do contortions to access).
25510 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25511 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25513 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25514 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25515 N0.getOperand(0), N0.getOperand(1));
25516 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25517 return R.getValue(1);
25520 if (!DCI.isBeforeLegalizeOps())
25523 if (!Subtarget->hasFp256())
25526 if (VT.isVector() && VT.getSizeInBits() == 256) {
25527 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25535 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25536 const X86Subtarget* Subtarget) {
25538 EVT VT = N->getValueType(0);
25540 // Let legalize expand this if it isn't a legal type yet.
25541 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25544 EVT ScalarVT = VT.getScalarType();
25545 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25546 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25549 SDValue A = N->getOperand(0);
25550 SDValue B = N->getOperand(1);
25551 SDValue C = N->getOperand(2);
25553 bool NegA = (A.getOpcode() == ISD::FNEG);
25554 bool NegB = (B.getOpcode() == ISD::FNEG);
25555 bool NegC = (C.getOpcode() == ISD::FNEG);
25557 // Negative multiplication when NegA xor NegB
25558 bool NegMul = (NegA != NegB);
25560 A = A.getOperand(0);
25562 B = B.getOperand(0);
25564 C = C.getOperand(0);
25568 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25570 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25572 return DAG.getNode(Opcode, dl, VT, A, B, C);
25575 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25576 TargetLowering::DAGCombinerInfo &DCI,
25577 const X86Subtarget *Subtarget) {
25578 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25579 // (and (i32 x86isd::setcc_carry), 1)
25580 // This eliminates the zext. This transformation is necessary because
25581 // ISD::SETCC is always legalized to i8.
25583 SDValue N0 = N->getOperand(0);
25584 EVT VT = N->getValueType(0);
25586 if (N0.getOpcode() == ISD::AND &&
25588 N0.getOperand(0).hasOneUse()) {
25589 SDValue N00 = N0.getOperand(0);
25590 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25591 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25592 if (!C || C->getZExtValue() != 1)
25594 return DAG.getNode(ISD::AND, dl, VT,
25595 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25596 N00.getOperand(0), N00.getOperand(1)),
25597 DAG.getConstant(1, VT));
25601 if (N0.getOpcode() == ISD::TRUNCATE &&
25603 N0.getOperand(0).hasOneUse()) {
25604 SDValue N00 = N0.getOperand(0);
25605 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25606 return DAG.getNode(ISD::AND, dl, VT,
25607 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25608 N00.getOperand(0), N00.getOperand(1)),
25609 DAG.getConstant(1, VT));
25612 if (VT.is256BitVector()) {
25613 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25618 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25619 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25620 // This exposes the zext to the udivrem lowering, so that it directly extends
25621 // from AH (which we otherwise need to do contortions to access).
25622 if (N0.getOpcode() == ISD::UDIVREM &&
25623 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25624 (VT == MVT::i32 || VT == MVT::i64)) {
25625 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25626 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25627 N0.getOperand(0), N0.getOperand(1));
25628 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25629 return R.getValue(1);
25635 // Optimize x == -y --> x+y == 0
25636 // x != -y --> x+y != 0
25637 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25638 const X86Subtarget* Subtarget) {
25639 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25640 SDValue LHS = N->getOperand(0);
25641 SDValue RHS = N->getOperand(1);
25642 EVT VT = N->getValueType(0);
25645 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25646 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25647 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25648 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25649 LHS.getValueType(), RHS, LHS.getOperand(1));
25650 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25651 addV, DAG.getConstant(0, addV.getValueType()), CC);
25653 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25654 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25655 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25656 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25657 RHS.getValueType(), LHS, RHS.getOperand(1));
25658 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25659 addV, DAG.getConstant(0, addV.getValueType()), CC);
25662 if (VT.getScalarType() == MVT::i1) {
25663 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25664 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25665 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25666 if (!IsSEXT0 && !IsVZero0)
25668 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25669 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25670 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25672 if (!IsSEXT1 && !IsVZero1)
25675 if (IsSEXT0 && IsVZero1) {
25676 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25677 if (CC == ISD::SETEQ)
25678 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25679 return LHS.getOperand(0);
25681 if (IsSEXT1 && IsVZero0) {
25682 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25683 if (CC == ISD::SETEQ)
25684 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25685 return RHS.getOperand(0);
25692 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25693 const X86Subtarget *Subtarget) {
25695 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25696 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25697 "X86insertps is only defined for v4x32");
25699 SDValue Ld = N->getOperand(1);
25700 if (MayFoldLoad(Ld)) {
25701 // Extract the countS bits from the immediate so we can get the proper
25702 // address when narrowing the vector load to a specific element.
25703 // When the second source op is a memory address, interps doesn't use
25704 // countS and just gets an f32 from that address.
25705 unsigned DestIndex =
25706 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25707 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25711 // Create this as a scalar to vector to match the instruction pattern.
25712 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25713 // countS bits are ignored when loading from memory on insertps, which
25714 // means we don't need to explicitly set them to 0.
25715 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25716 LoadScalarToVector, N->getOperand(2));
25719 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25720 // as "sbb reg,reg", since it can be extended without zext and produces
25721 // an all-ones bit which is more useful than 0/1 in some cases.
25722 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25725 return DAG.getNode(ISD::AND, DL, VT,
25726 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25727 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25728 DAG.getConstant(1, VT));
25729 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25730 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25731 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25732 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25735 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25736 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25737 TargetLowering::DAGCombinerInfo &DCI,
25738 const X86Subtarget *Subtarget) {
25740 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25741 SDValue EFLAGS = N->getOperand(1);
25743 if (CC == X86::COND_A) {
25744 // Try to convert COND_A into COND_B in an attempt to facilitate
25745 // materializing "setb reg".
25747 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25748 // cannot take an immediate as its first operand.
25750 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25751 EFLAGS.getValueType().isInteger() &&
25752 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25753 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25754 EFLAGS.getNode()->getVTList(),
25755 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25756 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25757 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25761 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25762 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25764 if (CC == X86::COND_B)
25765 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
25769 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25770 if (Flags.getNode()) {
25771 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25772 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
25778 // Optimize branch condition evaluation.
25780 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
25781 TargetLowering::DAGCombinerInfo &DCI,
25782 const X86Subtarget *Subtarget) {
25784 SDValue Chain = N->getOperand(0);
25785 SDValue Dest = N->getOperand(1);
25786 SDValue EFLAGS = N->getOperand(3);
25787 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
25791 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25792 if (Flags.getNode()) {
25793 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25794 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
25801 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
25802 SelectionDAG &DAG) {
25803 // Take advantage of vector comparisons producing 0 or -1 in each lane to
25804 // optimize away operation when it's from a constant.
25806 // The general transformation is:
25807 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
25808 // AND(VECTOR_CMP(x,y), constant2)
25809 // constant2 = UNARYOP(constant)
25811 // Early exit if this isn't a vector operation, the operand of the
25812 // unary operation isn't a bitwise AND, or if the sizes of the operations
25813 // aren't the same.
25814 EVT VT = N->getValueType(0);
25815 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
25816 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
25817 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
25820 // Now check that the other operand of the AND is a constant. We could
25821 // make the transformation for non-constant splats as well, but it's unclear
25822 // that would be a benefit as it would not eliminate any operations, just
25823 // perform one more step in scalar code before moving to the vector unit.
25824 if (BuildVectorSDNode *BV =
25825 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
25826 // Bail out if the vector isn't a constant.
25827 if (!BV->isConstant())
25830 // Everything checks out. Build up the new and improved node.
25832 EVT IntVT = BV->getValueType(0);
25833 // Create a new constant of the appropriate type for the transformed
25835 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
25836 // The AND node needs bitcasts to/from an integer vector type around it.
25837 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
25838 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
25839 N->getOperand(0)->getOperand(0), MaskConst);
25840 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
25847 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
25848 const X86TargetLowering *XTLI) {
25849 // First try to optimize away the conversion entirely when it's
25850 // conditionally from a constant. Vectors only.
25851 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
25852 if (Res != SDValue())
25855 // Now move on to more general possibilities.
25856 SDValue Op0 = N->getOperand(0);
25857 EVT InVT = Op0->getValueType(0);
25859 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
25860 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
25862 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
25863 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
25864 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
25867 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
25868 // a 32-bit target where SSE doesn't support i64->FP operations.
25869 if (Op0.getOpcode() == ISD::LOAD) {
25870 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
25871 EVT VT = Ld->getValueType(0);
25872 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
25873 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
25874 !XTLI->getSubtarget()->is64Bit() &&
25876 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0),
25877 Ld->getChain(), Op0, DAG);
25878 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
25885 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
25886 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
25887 X86TargetLowering::DAGCombinerInfo &DCI) {
25888 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
25889 // the result is either zero or one (depending on the input carry bit).
25890 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
25891 if (X86::isZeroNode(N->getOperand(0)) &&
25892 X86::isZeroNode(N->getOperand(1)) &&
25893 // We don't have a good way to replace an EFLAGS use, so only do this when
25895 SDValue(N, 1).use_empty()) {
25897 EVT VT = N->getValueType(0);
25898 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
25899 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
25900 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25901 DAG.getConstant(X86::COND_B,MVT::i8),
25903 DAG.getConstant(1, VT));
25904 return DCI.CombineTo(N, Res1, CarryOut);
25910 // fold (add Y, (sete X, 0)) -> adc 0, Y
25911 // (add Y, (setne X, 0)) -> sbb -1, Y
25912 // (sub (sete X, 0), Y) -> sbb 0, Y
25913 // (sub (setne X, 0), Y) -> adc -1, Y
25914 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
25917 // Look through ZExts.
25918 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
25919 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
25922 SDValue SetCC = Ext.getOperand(0);
25923 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
25926 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
25927 if (CC != X86::COND_E && CC != X86::COND_NE)
25930 SDValue Cmp = SetCC.getOperand(1);
25931 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
25932 !X86::isZeroNode(Cmp.getOperand(1)) ||
25933 !Cmp.getOperand(0).getValueType().isInteger())
25936 SDValue CmpOp0 = Cmp.getOperand(0);
25937 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
25938 DAG.getConstant(1, CmpOp0.getValueType()));
25940 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
25941 if (CC == X86::COND_NE)
25942 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
25943 DL, OtherVal.getValueType(), OtherVal,
25944 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
25945 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
25946 DL, OtherVal.getValueType(), OtherVal,
25947 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
25950 /// PerformADDCombine - Do target-specific dag combines on integer adds.
25951 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
25952 const X86Subtarget *Subtarget) {
25953 EVT VT = N->getValueType(0);
25954 SDValue Op0 = N->getOperand(0);
25955 SDValue Op1 = N->getOperand(1);
25957 // Try to synthesize horizontal adds from adds of shuffles.
25958 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25959 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25960 isHorizontalBinOp(Op0, Op1, true))
25961 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
25963 return OptimizeConditionalInDecrement(N, DAG);
25966 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
25967 const X86Subtarget *Subtarget) {
25968 SDValue Op0 = N->getOperand(0);
25969 SDValue Op1 = N->getOperand(1);
25971 // X86 can't encode an immediate LHS of a sub. See if we can push the
25972 // negation into a preceding instruction.
25973 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
25974 // If the RHS of the sub is a XOR with one use and a constant, invert the
25975 // immediate. Then add one to the LHS of the sub so we can turn
25976 // X-Y -> X+~Y+1, saving one register.
25977 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
25978 isa<ConstantSDNode>(Op1.getOperand(1))) {
25979 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
25980 EVT VT = Op0.getValueType();
25981 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
25983 DAG.getConstant(~XorC, VT));
25984 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
25985 DAG.getConstant(C->getAPIntValue()+1, VT));
25989 // Try to synthesize horizontal adds from adds of shuffles.
25990 EVT VT = N->getValueType(0);
25991 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25992 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25993 isHorizontalBinOp(Op0, Op1, true))
25994 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
25996 return OptimizeConditionalInDecrement(N, DAG);
25999 /// performVZEXTCombine - Performs build vector combines
26000 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26001 TargetLowering::DAGCombinerInfo &DCI,
26002 const X86Subtarget *Subtarget) {
26004 MVT VT = N->getSimpleValueType(0);
26005 SDValue Op = N->getOperand(0);
26006 MVT OpVT = Op.getSimpleValueType();
26007 MVT OpEltVT = OpVT.getVectorElementType();
26008 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26010 // (vzext (bitcast (vzext (x)) -> (vzext x)
26012 while (V.getOpcode() == ISD::BITCAST)
26013 V = V.getOperand(0);
26015 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26016 MVT InnerVT = V.getSimpleValueType();
26017 MVT InnerEltVT = InnerVT.getVectorElementType();
26019 // If the element sizes match exactly, we can just do one larger vzext. This
26020 // is always an exact type match as vzext operates on integer types.
26021 if (OpEltVT == InnerEltVT) {
26022 assert(OpVT == InnerVT && "Types must match for vzext!");
26023 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26026 // The only other way we can combine them is if only a single element of the
26027 // inner vzext is used in the input to the outer vzext.
26028 if (InnerEltVT.getSizeInBits() < InputBits)
26031 // In this case, the inner vzext is completely dead because we're going to
26032 // only look at bits inside of the low element. Just do the outer vzext on
26033 // a bitcast of the input to the inner.
26034 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26035 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26038 // Check if we can bypass extracting and re-inserting an element of an input
26039 // vector. Essentialy:
26040 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26041 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26042 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26043 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26044 SDValue ExtractedV = V.getOperand(0);
26045 SDValue OrigV = ExtractedV.getOperand(0);
26046 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26047 if (ExtractIdx->getZExtValue() == 0) {
26048 MVT OrigVT = OrigV.getSimpleValueType();
26049 // Extract a subvector if necessary...
26050 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26051 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26052 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26053 OrigVT.getVectorNumElements() / Ratio);
26054 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26055 DAG.getIntPtrConstant(0));
26057 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26058 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26065 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26066 DAGCombinerInfo &DCI) const {
26067 SelectionDAG &DAG = DCI.DAG;
26068 switch (N->getOpcode()) {
26070 case ISD::EXTRACT_VECTOR_ELT:
26071 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26074 case X86ISD::SHRUNKBLEND:
26075 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26076 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26077 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26078 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26079 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26080 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26083 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26084 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26085 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26086 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26087 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26088 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26089 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26090 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26091 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this);
26092 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26093 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26095 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26097 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26098 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26099 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26100 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26101 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26102 case ISD::ANY_EXTEND:
26103 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26104 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26105 case ISD::SIGN_EXTEND_INREG:
26106 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26107 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26108 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26109 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26110 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26111 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26112 case X86ISD::SHUFP: // Handle all target specific shuffles
26113 case X86ISD::PALIGNR:
26114 case X86ISD::UNPCKH:
26115 case X86ISD::UNPCKL:
26116 case X86ISD::MOVHLPS:
26117 case X86ISD::MOVLHPS:
26118 case X86ISD::PSHUFB:
26119 case X86ISD::PSHUFD:
26120 case X86ISD::PSHUFHW:
26121 case X86ISD::PSHUFLW:
26122 case X86ISD::MOVSS:
26123 case X86ISD::MOVSD:
26124 case X86ISD::VPERMILPI:
26125 case X86ISD::VPERM2X128:
26126 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26127 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26128 case ISD::INTRINSIC_WO_CHAIN:
26129 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26130 case X86ISD::INSERTPS: {
26131 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26132 return PerformINSERTPSCombine(N, DAG, Subtarget);
26135 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26141 /// isTypeDesirableForOp - Return true if the target has native support for
26142 /// the specified value type and it is 'desirable' to use the type for the
26143 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26144 /// instruction encodings are longer and some i16 instructions are slow.
26145 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26146 if (!isTypeLegal(VT))
26148 if (VT != MVT::i16)
26155 case ISD::SIGN_EXTEND:
26156 case ISD::ZERO_EXTEND:
26157 case ISD::ANY_EXTEND:
26170 /// IsDesirableToPromoteOp - This method query the target whether it is
26171 /// beneficial for dag combiner to promote the specified node. If true, it
26172 /// should return the desired promotion type by reference.
26173 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26174 EVT VT = Op.getValueType();
26175 if (VT != MVT::i16)
26178 bool Promote = false;
26179 bool Commute = false;
26180 switch (Op.getOpcode()) {
26183 LoadSDNode *LD = cast<LoadSDNode>(Op);
26184 // If the non-extending load has a single use and it's not live out, then it
26185 // might be folded.
26186 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26187 Op.hasOneUse()*/) {
26188 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26189 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26190 // The only case where we'd want to promote LOAD (rather then it being
26191 // promoted as an operand is when it's only use is liveout.
26192 if (UI->getOpcode() != ISD::CopyToReg)
26199 case ISD::SIGN_EXTEND:
26200 case ISD::ZERO_EXTEND:
26201 case ISD::ANY_EXTEND:
26206 SDValue N0 = Op.getOperand(0);
26207 // Look out for (store (shl (load), x)).
26208 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26221 SDValue N0 = Op.getOperand(0);
26222 SDValue N1 = Op.getOperand(1);
26223 if (!Commute && MayFoldLoad(N1))
26225 // Avoid disabling potential load folding opportunities.
26226 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26228 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26238 //===----------------------------------------------------------------------===//
26239 // X86 Inline Assembly Support
26240 //===----------------------------------------------------------------------===//
26243 // Helper to match a string separated by whitespace.
26244 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26245 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26247 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26248 StringRef piece(*args[i]);
26249 if (!s.startswith(piece)) // Check if the piece matches.
26252 s = s.substr(piece.size());
26253 StringRef::size_type pos = s.find_first_not_of(" \t");
26254 if (pos == 0) // We matched a prefix.
26262 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26265 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26267 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26268 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26269 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26270 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26272 if (AsmPieces.size() == 3)
26274 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26281 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26282 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26284 std::string AsmStr = IA->getAsmString();
26286 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26287 if (!Ty || Ty->getBitWidth() % 16 != 0)
26290 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26291 SmallVector<StringRef, 4> AsmPieces;
26292 SplitString(AsmStr, AsmPieces, ";\n");
26294 switch (AsmPieces.size()) {
26295 default: return false;
26297 // FIXME: this should verify that we are targeting a 486 or better. If not,
26298 // we will turn this bswap into something that will be lowered to logical
26299 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26300 // lower so don't worry about this.
26302 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26303 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26304 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26305 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26306 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26307 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26308 // No need to check constraints, nothing other than the equivalent of
26309 // "=r,0" would be valid here.
26310 return IntrinsicLowering::LowerToByteSwap(CI);
26313 // rorw $$8, ${0:w} --> llvm.bswap.i16
26314 if (CI->getType()->isIntegerTy(16) &&
26315 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26316 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26317 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26319 const std::string &ConstraintsStr = IA->getConstraintString();
26320 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26321 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26322 if (clobbersFlagRegisters(AsmPieces))
26323 return IntrinsicLowering::LowerToByteSwap(CI);
26327 if (CI->getType()->isIntegerTy(32) &&
26328 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26329 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26330 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26331 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26333 const std::string &ConstraintsStr = IA->getConstraintString();
26334 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26335 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26336 if (clobbersFlagRegisters(AsmPieces))
26337 return IntrinsicLowering::LowerToByteSwap(CI);
26340 if (CI->getType()->isIntegerTy(64)) {
26341 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26342 if (Constraints.size() >= 2 &&
26343 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26344 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26345 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26346 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26347 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26348 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26349 return IntrinsicLowering::LowerToByteSwap(CI);
26357 /// getConstraintType - Given a constraint letter, return the type of
26358 /// constraint it is for this target.
26359 X86TargetLowering::ConstraintType
26360 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26361 if (Constraint.size() == 1) {
26362 switch (Constraint[0]) {
26373 return C_RegisterClass;
26397 return TargetLowering::getConstraintType(Constraint);
26400 /// Examine constraint type and operand type and determine a weight value.
26401 /// This object must already have been set up with the operand type
26402 /// and the current alternative constraint selected.
26403 TargetLowering::ConstraintWeight
26404 X86TargetLowering::getSingleConstraintMatchWeight(
26405 AsmOperandInfo &info, const char *constraint) const {
26406 ConstraintWeight weight = CW_Invalid;
26407 Value *CallOperandVal = info.CallOperandVal;
26408 // If we don't have a value, we can't do a match,
26409 // but allow it at the lowest weight.
26410 if (!CallOperandVal)
26412 Type *type = CallOperandVal->getType();
26413 // Look at the constraint type.
26414 switch (*constraint) {
26416 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26427 if (CallOperandVal->getType()->isIntegerTy())
26428 weight = CW_SpecificReg;
26433 if (type->isFloatingPointTy())
26434 weight = CW_SpecificReg;
26437 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26438 weight = CW_SpecificReg;
26442 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26443 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26444 weight = CW_Register;
26447 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26448 if (C->getZExtValue() <= 31)
26449 weight = CW_Constant;
26453 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26454 if (C->getZExtValue() <= 63)
26455 weight = CW_Constant;
26459 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26460 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26461 weight = CW_Constant;
26465 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26466 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26467 weight = CW_Constant;
26471 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26472 if (C->getZExtValue() <= 3)
26473 weight = CW_Constant;
26477 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26478 if (C->getZExtValue() <= 0xff)
26479 weight = CW_Constant;
26484 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26485 weight = CW_Constant;
26489 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26490 if ((C->getSExtValue() >= -0x80000000LL) &&
26491 (C->getSExtValue() <= 0x7fffffffLL))
26492 weight = CW_Constant;
26496 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26497 if (C->getZExtValue() <= 0xffffffff)
26498 weight = CW_Constant;
26505 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26506 /// with another that has more specific requirements based on the type of the
26507 /// corresponding operand.
26508 const char *X86TargetLowering::
26509 LowerXConstraint(EVT ConstraintVT) const {
26510 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26511 // 'f' like normal targets.
26512 if (ConstraintVT.isFloatingPoint()) {
26513 if (Subtarget->hasSSE2())
26515 if (Subtarget->hasSSE1())
26519 return TargetLowering::LowerXConstraint(ConstraintVT);
26522 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26523 /// vector. If it is invalid, don't add anything to Ops.
26524 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26525 std::string &Constraint,
26526 std::vector<SDValue>&Ops,
26527 SelectionDAG &DAG) const {
26530 // Only support length 1 constraints for now.
26531 if (Constraint.length() > 1) return;
26533 char ConstraintLetter = Constraint[0];
26534 switch (ConstraintLetter) {
26537 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26538 if (C->getZExtValue() <= 31) {
26539 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26545 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26546 if (C->getZExtValue() <= 63) {
26547 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26553 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26554 if (isInt<8>(C->getSExtValue())) {
26555 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26561 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26562 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26563 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26564 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26570 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26571 if (C->getZExtValue() <= 3) {
26572 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26578 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26579 if (C->getZExtValue() <= 255) {
26580 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26586 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26587 if (C->getZExtValue() <= 127) {
26588 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26594 // 32-bit signed value
26595 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26596 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26597 C->getSExtValue())) {
26598 // Widen to 64 bits here to get it sign extended.
26599 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26602 // FIXME gcc accepts some relocatable values here too, but only in certain
26603 // memory models; it's complicated.
26608 // 32-bit unsigned value
26609 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26610 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26611 C->getZExtValue())) {
26612 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26616 // FIXME gcc accepts some relocatable values here too, but only in certain
26617 // memory models; it's complicated.
26621 // Literal immediates are always ok.
26622 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26623 // Widen to 64 bits here to get it sign extended.
26624 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26628 // In any sort of PIC mode addresses need to be computed at runtime by
26629 // adding in a register or some sort of table lookup. These can't
26630 // be used as immediates.
26631 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26634 // If we are in non-pic codegen mode, we allow the address of a global (with
26635 // an optional displacement) to be used with 'i'.
26636 GlobalAddressSDNode *GA = nullptr;
26637 int64_t Offset = 0;
26639 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26641 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26642 Offset += GA->getOffset();
26644 } else if (Op.getOpcode() == ISD::ADD) {
26645 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26646 Offset += C->getZExtValue();
26647 Op = Op.getOperand(0);
26650 } else if (Op.getOpcode() == ISD::SUB) {
26651 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26652 Offset += -C->getZExtValue();
26653 Op = Op.getOperand(0);
26658 // Otherwise, this isn't something we can handle, reject it.
26662 const GlobalValue *GV = GA->getGlobal();
26663 // If we require an extra load to get this address, as in PIC mode, we
26664 // can't accept it.
26665 if (isGlobalStubReference(
26666 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26669 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26670 GA->getValueType(0), Offset);
26675 if (Result.getNode()) {
26676 Ops.push_back(Result);
26679 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26682 std::pair<unsigned, const TargetRegisterClass*>
26683 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26685 // First, see if this is a constraint that directly corresponds to an LLVM
26687 if (Constraint.size() == 1) {
26688 // GCC Constraint Letters
26689 switch (Constraint[0]) {
26691 // TODO: Slight differences here in allocation order and leaving
26692 // RIP in the class. Do they matter any more here than they do
26693 // in the normal allocation?
26694 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26695 if (Subtarget->is64Bit()) {
26696 if (VT == MVT::i32 || VT == MVT::f32)
26697 return std::make_pair(0U, &X86::GR32RegClass);
26698 if (VT == MVT::i16)
26699 return std::make_pair(0U, &X86::GR16RegClass);
26700 if (VT == MVT::i8 || VT == MVT::i1)
26701 return std::make_pair(0U, &X86::GR8RegClass);
26702 if (VT == MVT::i64 || VT == MVT::f64)
26703 return std::make_pair(0U, &X86::GR64RegClass);
26706 // 32-bit fallthrough
26707 case 'Q': // Q_REGS
26708 if (VT == MVT::i32 || VT == MVT::f32)
26709 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26710 if (VT == MVT::i16)
26711 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26712 if (VT == MVT::i8 || VT == MVT::i1)
26713 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26714 if (VT == MVT::i64)
26715 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26717 case 'r': // GENERAL_REGS
26718 case 'l': // INDEX_REGS
26719 if (VT == MVT::i8 || VT == MVT::i1)
26720 return std::make_pair(0U, &X86::GR8RegClass);
26721 if (VT == MVT::i16)
26722 return std::make_pair(0U, &X86::GR16RegClass);
26723 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26724 return std::make_pair(0U, &X86::GR32RegClass);
26725 return std::make_pair(0U, &X86::GR64RegClass);
26726 case 'R': // LEGACY_REGS
26727 if (VT == MVT::i8 || VT == MVT::i1)
26728 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26729 if (VT == MVT::i16)
26730 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26731 if (VT == MVT::i32 || !Subtarget->is64Bit())
26732 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26733 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26734 case 'f': // FP Stack registers.
26735 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26736 // value to the correct fpstack register class.
26737 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26738 return std::make_pair(0U, &X86::RFP32RegClass);
26739 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26740 return std::make_pair(0U, &X86::RFP64RegClass);
26741 return std::make_pair(0U, &X86::RFP80RegClass);
26742 case 'y': // MMX_REGS if MMX allowed.
26743 if (!Subtarget->hasMMX()) break;
26744 return std::make_pair(0U, &X86::VR64RegClass);
26745 case 'Y': // SSE_REGS if SSE2 allowed
26746 if (!Subtarget->hasSSE2()) break;
26748 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26749 if (!Subtarget->hasSSE1()) break;
26751 switch (VT.SimpleTy) {
26753 // Scalar SSE types.
26756 return std::make_pair(0U, &X86::FR32RegClass);
26759 return std::make_pair(0U, &X86::FR64RegClass);
26767 return std::make_pair(0U, &X86::VR128RegClass);
26775 return std::make_pair(0U, &X86::VR256RegClass);
26780 return std::make_pair(0U, &X86::VR512RegClass);
26786 // Use the default implementation in TargetLowering to convert the register
26787 // constraint into a member of a register class.
26788 std::pair<unsigned, const TargetRegisterClass*> Res;
26789 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
26791 // Not found as a standard register?
26793 // Map st(0) -> st(7) -> ST0
26794 if (Constraint.size() == 7 && Constraint[0] == '{' &&
26795 tolower(Constraint[1]) == 's' &&
26796 tolower(Constraint[2]) == 't' &&
26797 Constraint[3] == '(' &&
26798 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
26799 Constraint[5] == ')' &&
26800 Constraint[6] == '}') {
26802 Res.first = X86::FP0+Constraint[4]-'0';
26803 Res.second = &X86::RFP80RegClass;
26807 // GCC allows "st(0)" to be called just plain "st".
26808 if (StringRef("{st}").equals_lower(Constraint)) {
26809 Res.first = X86::FP0;
26810 Res.second = &X86::RFP80RegClass;
26815 if (StringRef("{flags}").equals_lower(Constraint)) {
26816 Res.first = X86::EFLAGS;
26817 Res.second = &X86::CCRRegClass;
26821 // 'A' means EAX + EDX.
26822 if (Constraint == "A") {
26823 Res.first = X86::EAX;
26824 Res.second = &X86::GR32_ADRegClass;
26830 // Otherwise, check to see if this is a register class of the wrong value
26831 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
26832 // turn into {ax},{dx}.
26833 if (Res.second->hasType(VT))
26834 return Res; // Correct type already, nothing to do.
26836 // All of the single-register GCC register classes map their values onto
26837 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
26838 // really want an 8-bit or 32-bit register, map to the appropriate register
26839 // class and return the appropriate register.
26840 if (Res.second == &X86::GR16RegClass) {
26841 if (VT == MVT::i8 || VT == MVT::i1) {
26842 unsigned DestReg = 0;
26843 switch (Res.first) {
26845 case X86::AX: DestReg = X86::AL; break;
26846 case X86::DX: DestReg = X86::DL; break;
26847 case X86::CX: DestReg = X86::CL; break;
26848 case X86::BX: DestReg = X86::BL; break;
26851 Res.first = DestReg;
26852 Res.second = &X86::GR8RegClass;
26854 } else if (VT == MVT::i32 || VT == MVT::f32) {
26855 unsigned DestReg = 0;
26856 switch (Res.first) {
26858 case X86::AX: DestReg = X86::EAX; break;
26859 case X86::DX: DestReg = X86::EDX; break;
26860 case X86::CX: DestReg = X86::ECX; break;
26861 case X86::BX: DestReg = X86::EBX; break;
26862 case X86::SI: DestReg = X86::ESI; break;
26863 case X86::DI: DestReg = X86::EDI; break;
26864 case X86::BP: DestReg = X86::EBP; break;
26865 case X86::SP: DestReg = X86::ESP; break;
26868 Res.first = DestReg;
26869 Res.second = &X86::GR32RegClass;
26871 } else if (VT == MVT::i64 || VT == MVT::f64) {
26872 unsigned DestReg = 0;
26873 switch (Res.first) {
26875 case X86::AX: DestReg = X86::RAX; break;
26876 case X86::DX: DestReg = X86::RDX; break;
26877 case X86::CX: DestReg = X86::RCX; break;
26878 case X86::BX: DestReg = X86::RBX; break;
26879 case X86::SI: DestReg = X86::RSI; break;
26880 case X86::DI: DestReg = X86::RDI; break;
26881 case X86::BP: DestReg = X86::RBP; break;
26882 case X86::SP: DestReg = X86::RSP; break;
26885 Res.first = DestReg;
26886 Res.second = &X86::GR64RegClass;
26889 } else if (Res.second == &X86::FR32RegClass ||
26890 Res.second == &X86::FR64RegClass ||
26891 Res.second == &X86::VR128RegClass ||
26892 Res.second == &X86::VR256RegClass ||
26893 Res.second == &X86::FR32XRegClass ||
26894 Res.second == &X86::FR64XRegClass ||
26895 Res.second == &X86::VR128XRegClass ||
26896 Res.second == &X86::VR256XRegClass ||
26897 Res.second == &X86::VR512RegClass) {
26898 // Handle references to XMM physical registers that got mapped into the
26899 // wrong class. This can happen with constraints like {xmm0} where the
26900 // target independent register mapper will just pick the first match it can
26901 // find, ignoring the required type.
26903 if (VT == MVT::f32 || VT == MVT::i32)
26904 Res.second = &X86::FR32RegClass;
26905 else if (VT == MVT::f64 || VT == MVT::i64)
26906 Res.second = &X86::FR64RegClass;
26907 else if (X86::VR128RegClass.hasType(VT))
26908 Res.second = &X86::VR128RegClass;
26909 else if (X86::VR256RegClass.hasType(VT))
26910 Res.second = &X86::VR256RegClass;
26911 else if (X86::VR512RegClass.hasType(VT))
26912 Res.second = &X86::VR512RegClass;
26918 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
26920 // Scaling factors are not free at all.
26921 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
26922 // will take 2 allocations in the out of order engine instead of 1
26923 // for plain addressing mode, i.e. inst (reg1).
26925 // vaddps (%rsi,%drx), %ymm0, %ymm1
26926 // Requires two allocations (one for the load, one for the computation)
26928 // vaddps (%rsi), %ymm0, %ymm1
26929 // Requires just 1 allocation, i.e., freeing allocations for other operations
26930 // and having less micro operations to execute.
26932 // For some X86 architectures, this is even worse because for instance for
26933 // stores, the complex addressing mode forces the instruction to use the
26934 // "load" ports instead of the dedicated "store" port.
26935 // E.g., on Haswell:
26936 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
26937 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
26938 if (isLegalAddressingMode(AM, Ty))
26939 // Scale represents reg2 * scale, thus account for 1
26940 // as soon as we use a second register.
26941 return AM.Scale != 0;
26945 bool X86TargetLowering::isTargetFTOL() const {
26946 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();