1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<int> ReciprocalEstimateRefinementSteps(
76 "x86-recip-refinement-steps", cl::init(1),
77 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
78 "result of the hardware reciprocal estimate instruction."),
81 // Forward declarations.
82 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
85 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
86 SelectionDAG &DAG, SDLoc dl,
87 unsigned vectorWidth) {
88 assert((vectorWidth == 128 || vectorWidth == 256) &&
89 "Unsupported vector width");
90 EVT VT = Vec.getValueType();
91 EVT ElVT = VT.getVectorElementType();
92 unsigned Factor = VT.getSizeInBits()/vectorWidth;
93 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
94 VT.getVectorNumElements()/Factor);
96 // Extract from UNDEF is UNDEF.
97 if (Vec.getOpcode() == ISD::UNDEF)
98 return DAG.getUNDEF(ResultVT);
100 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
101 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
103 // This is the index of the first element of the vectorWidth-bit chunk
105 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
108 // If the input is a buildvector just emit a smaller one.
109 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
110 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
111 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
114 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
115 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
118 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
119 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
120 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
121 /// instructions or a simple subregister reference. Idx is an index in the
122 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
123 /// lowering EXTRACT_VECTOR_ELT operations easier.
124 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
125 SelectionDAG &DAG, SDLoc dl) {
126 assert((Vec.getValueType().is256BitVector() ||
127 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
128 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
131 /// Generate a DAG to grab 256-bits from a 512-bit vector.
132 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
133 SelectionDAG &DAG, SDLoc dl) {
134 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
138 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
139 unsigned IdxVal, SelectionDAG &DAG,
140 SDLoc dl, unsigned vectorWidth) {
141 assert((vectorWidth == 128 || vectorWidth == 256) &&
142 "Unsupported vector width");
143 // Inserting UNDEF is Result
144 if (Vec.getOpcode() == ISD::UNDEF)
146 EVT VT = Vec.getValueType();
147 EVT ElVT = VT.getVectorElementType();
148 EVT ResultVT = Result.getValueType();
150 // Insert the relevant vectorWidth bits.
151 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
153 // This is the index of the first element of the vectorWidth-bit chunk
155 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
158 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
159 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
162 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
163 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
164 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
165 /// simple superregister reference. Idx is an index in the 128 bits
166 /// we want. It need not be aligned to a 128-bit boundary. That makes
167 /// lowering INSERT_VECTOR_ELT operations easier.
168 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
169 SelectionDAG &DAG,SDLoc dl) {
170 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
171 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
174 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
175 SelectionDAG &DAG, SDLoc dl) {
176 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
177 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
180 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
181 /// instructions. This is used because creating CONCAT_VECTOR nodes of
182 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
183 /// large BUILD_VECTORS.
184 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
185 unsigned NumElems, SelectionDAG &DAG,
187 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
188 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
191 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
198 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
199 const X86Subtarget &STI)
200 : TargetLowering(TM), Subtarget(&STI) {
201 X86ScalarSSEf64 = Subtarget->hasSSE2();
202 X86ScalarSSEf32 = Subtarget->hasSSE1();
203 TD = getDataLayout();
205 // Set up the TargetLowering object.
206 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
208 // X86 is weird. It always uses i8 for shift amounts and setcc results.
209 setBooleanContents(ZeroOrOneBooleanContent);
210 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
211 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
213 // For 64-bit, since we have so many registers, use the ILP scheduler.
214 // For 32-bit, use the register pressure specific scheduling.
215 // For Atom, always use ILP scheduling.
216 if (Subtarget->isAtom())
217 setSchedulingPreference(Sched::ILP);
218 else if (Subtarget->is64Bit())
219 setSchedulingPreference(Sched::ILP);
221 setSchedulingPreference(Sched::RegPressure);
222 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
223 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
225 // Bypass expensive divides on Atom when compiling with O2.
226 if (TM.getOptLevel() >= CodeGenOpt::Default) {
227 if (Subtarget->hasSlowDivide32())
228 addBypassSlowDiv(32, 8);
229 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
230 addBypassSlowDiv(64, 16);
233 if (Subtarget->isTargetKnownWindowsMSVC()) {
234 // Setup Windows compiler runtime calls.
235 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
236 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
237 setLibcallName(RTLIB::SREM_I64, "_allrem");
238 setLibcallName(RTLIB::UREM_I64, "_aullrem");
239 setLibcallName(RTLIB::MUL_I64, "_allmul");
240 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
241 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
242 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
243 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
244 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
246 // The _ftol2 runtime function has an unusual calling conv, which
247 // is modeled by a special pseudo-instruction.
248 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
249 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
250 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
251 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
254 if (Subtarget->isTargetDarwin()) {
255 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
256 setUseUnderscoreSetJmp(false);
257 setUseUnderscoreLongJmp(false);
258 } else if (Subtarget->isTargetWindowsGNU()) {
259 // MS runtime is weird: it exports _setjmp, but longjmp!
260 setUseUnderscoreSetJmp(true);
261 setUseUnderscoreLongJmp(false);
263 setUseUnderscoreSetJmp(true);
264 setUseUnderscoreLongJmp(true);
267 // Set up the register classes.
268 addRegisterClass(MVT::i8, &X86::GR8RegClass);
269 addRegisterClass(MVT::i16, &X86::GR16RegClass);
270 addRegisterClass(MVT::i32, &X86::GR32RegClass);
271 if (Subtarget->is64Bit())
272 addRegisterClass(MVT::i64, &X86::GR64RegClass);
274 for (MVT VT : MVT::integer_valuetypes())
275 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
277 // We don't accept any truncstore of integer registers.
278 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
279 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
280 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
281 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
282 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
283 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
285 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
287 // SETOEQ and SETUNE require checking two conditions.
288 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
289 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
290 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
291 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
292 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
293 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
295 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
297 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
298 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
299 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
301 if (Subtarget->is64Bit()) {
302 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
303 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
304 } else if (!TM.Options.UseSoftFloat) {
305 // We have an algorithm for SSE2->double, and we turn this into a
306 // 64-bit FILD followed by conditional FADD for other targets.
307 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
308 // We have an algorithm for SSE2, and we turn this into a 64-bit
309 // FILD for other targets.
310 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
313 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
315 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
316 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
318 if (!TM.Options.UseSoftFloat) {
319 // SSE has no i16 to fp conversion, only i32
320 if (X86ScalarSSEf32) {
321 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
322 // f32 and f64 cases are Legal, f80 case is not
323 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
325 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
326 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
329 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
333 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
334 // are Legal, f80 is custom lowered.
335 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
338 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
340 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
341 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
343 if (X86ScalarSSEf32) {
344 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
345 // f32 and f64 cases are Legal, f80 case is not
346 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
349 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
352 // Handle FP_TO_UINT by promoting the destination to a larger signed
354 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
355 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
356 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
358 if (Subtarget->is64Bit()) {
359 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
360 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
361 } else if (!TM.Options.UseSoftFloat) {
362 // Since AVX is a superset of SSE3, only check for SSE here.
363 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
364 // Expand FP_TO_UINT into a select.
365 // FIXME: We would like to use a Custom expander here eventually to do
366 // the optimal thing for SSE vs. the default expansion in the legalizer.
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
369 // With SSE3 we can use fisttpll to convert to a signed i64; without
370 // SSE, we're stuck with a fistpll.
371 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
374 if (isTargetFTOL()) {
375 // Use the _ftol2 runtime function, which has a pseudo-instruction
376 // to handle its weird calling convention.
377 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
380 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
381 if (!X86ScalarSSEf64) {
382 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
383 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
384 if (Subtarget->is64Bit()) {
385 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
386 // Without SSE, i64->f64 goes through memory.
387 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
391 // Scalar integer divide and remainder are lowered to use operations that
392 // produce two results, to match the available instructions. This exposes
393 // the two-result form to trivial CSE, which is able to combine x/y and x%y
394 // into a single instruction.
396 // Scalar integer multiply-high is also lowered to use two-result
397 // operations, to match the available instructions. However, plain multiply
398 // (low) operations are left as Legal, as there are single-result
399 // instructions for this in x86. Using the two-result multiply instructions
400 // when both high and low results are needed must be arranged by dagcombine.
401 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
403 setOperationAction(ISD::MULHS, VT, Expand);
404 setOperationAction(ISD::MULHU, VT, Expand);
405 setOperationAction(ISD::SDIV, VT, Expand);
406 setOperationAction(ISD::UDIV, VT, Expand);
407 setOperationAction(ISD::SREM, VT, Expand);
408 setOperationAction(ISD::UREM, VT, Expand);
410 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
411 setOperationAction(ISD::ADDC, VT, Custom);
412 setOperationAction(ISD::ADDE, VT, Custom);
413 setOperationAction(ISD::SUBC, VT, Custom);
414 setOperationAction(ISD::SUBE, VT, Custom);
417 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
418 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
419 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
420 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
421 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
422 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
423 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
424 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
425 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
426 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
427 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
428 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
429 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
430 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
431 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
432 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
433 if (Subtarget->is64Bit())
434 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
435 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
436 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
437 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
438 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
439 setOperationAction(ISD::FREM , MVT::f32 , Expand);
440 setOperationAction(ISD::FREM , MVT::f64 , Expand);
441 setOperationAction(ISD::FREM , MVT::f80 , Expand);
442 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
444 // Promote the i8 variants and force them on up to i32 which has a shorter
446 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
447 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
448 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
449 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
450 if (Subtarget->hasBMI()) {
451 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
452 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
453 if (Subtarget->is64Bit())
454 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
456 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
457 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
458 if (Subtarget->is64Bit())
459 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
462 if (Subtarget->hasLZCNT()) {
463 // When promoting the i8 variants, force them to i32 for a shorter
465 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
466 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
467 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
468 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
469 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
470 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
471 if (Subtarget->is64Bit())
472 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
474 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
475 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
476 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
478 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
480 if (Subtarget->is64Bit()) {
481 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
482 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
486 // Special handling for half-precision floating point conversions.
487 // If we don't have F16C support, then lower half float conversions
488 // into library calls.
489 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
490 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
491 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
494 // There's never any support for operations beyond MVT::f32.
495 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
496 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
497 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
500 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
501 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
502 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
503 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
504 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
505 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
507 if (Subtarget->hasPOPCNT()) {
508 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
510 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
511 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
512 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
513 if (Subtarget->is64Bit())
514 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
517 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
519 if (!Subtarget->hasMOVBE())
520 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
522 // These should be promoted to a larger select which is supported.
523 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
524 // X86 wants to expand cmov itself.
525 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
526 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
527 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
528 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
529 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
530 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
531 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
532 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
533 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
534 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
535 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
536 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
537 if (Subtarget->is64Bit()) {
538 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
541 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
542 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
543 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
544 // support continuation, user-level threading, and etc.. As a result, no
545 // other SjLj exception interfaces are implemented and please don't build
546 // your own exception handling based on them.
547 // LLVM/Clang supports zero-cost DWARF exception handling.
548 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
549 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
552 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
553 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
554 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
555 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
556 if (Subtarget->is64Bit())
557 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
558 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
559 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
560 if (Subtarget->is64Bit()) {
561 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
562 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
563 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
564 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
565 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
567 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
568 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
569 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
570 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
571 if (Subtarget->is64Bit()) {
572 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
573 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
574 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
577 if (Subtarget->hasSSE1())
578 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
580 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
582 // Expand certain atomics
583 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
585 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
586 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
587 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
590 if (Subtarget->hasCmpxchg16b()) {
591 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
594 // FIXME - use subtarget debug flags
595 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
596 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
597 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
600 if (Subtarget->is64Bit()) {
601 setExceptionPointerRegister(X86::RAX);
602 setExceptionSelectorRegister(X86::RDX);
604 setExceptionPointerRegister(X86::EAX);
605 setExceptionSelectorRegister(X86::EDX);
607 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
608 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
610 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
611 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
613 setOperationAction(ISD::TRAP, MVT::Other, Legal);
614 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
616 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
617 setOperationAction(ISD::VASTART , MVT::Other, Custom);
618 setOperationAction(ISD::VAEND , MVT::Other, Expand);
619 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
620 // TargetInfo::X86_64ABIBuiltinVaList
621 setOperationAction(ISD::VAARG , MVT::Other, Custom);
622 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
624 // TargetInfo::CharPtrBuiltinVaList
625 setOperationAction(ISD::VAARG , MVT::Other, Expand);
626 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
629 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
630 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
632 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
634 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
635 // f32 and f64 use SSE.
636 // Set up the FP register classes.
637 addRegisterClass(MVT::f32, &X86::FR32RegClass);
638 addRegisterClass(MVT::f64, &X86::FR64RegClass);
640 // Use ANDPD to simulate FABS.
641 setOperationAction(ISD::FABS , MVT::f64, Custom);
642 setOperationAction(ISD::FABS , MVT::f32, Custom);
644 // Use XORP to simulate FNEG.
645 setOperationAction(ISD::FNEG , MVT::f64, Custom);
646 setOperationAction(ISD::FNEG , MVT::f32, Custom);
648 // Use ANDPD and ORPD to simulate FCOPYSIGN.
649 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
650 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
652 // Lower this to FGETSIGNx86 plus an AND.
653 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
654 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
656 // We don't support sin/cos/fmod
657 setOperationAction(ISD::FSIN , MVT::f64, Expand);
658 setOperationAction(ISD::FCOS , MVT::f64, Expand);
659 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
660 setOperationAction(ISD::FSIN , MVT::f32, Expand);
661 setOperationAction(ISD::FCOS , MVT::f32, Expand);
662 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
664 // Expand FP immediates into loads from the stack, except for the special
666 addLegalFPImmediate(APFloat(+0.0)); // xorpd
667 addLegalFPImmediate(APFloat(+0.0f)); // xorps
668 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
669 // Use SSE for f32, x87 for f64.
670 // Set up the FP register classes.
671 addRegisterClass(MVT::f32, &X86::FR32RegClass);
672 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
674 // Use ANDPS to simulate FABS.
675 setOperationAction(ISD::FABS , MVT::f32, Custom);
677 // Use XORP to simulate FNEG.
678 setOperationAction(ISD::FNEG , MVT::f32, Custom);
680 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
682 // Use ANDPS and ORPS to simulate FCOPYSIGN.
683 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
684 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
686 // We don't support sin/cos/fmod
687 setOperationAction(ISD::FSIN , MVT::f32, Expand);
688 setOperationAction(ISD::FCOS , MVT::f32, Expand);
689 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
691 // Special cases we handle for FP constants.
692 addLegalFPImmediate(APFloat(+0.0f)); // xorps
693 addLegalFPImmediate(APFloat(+0.0)); // FLD0
694 addLegalFPImmediate(APFloat(+1.0)); // FLD1
695 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
696 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
698 if (!TM.Options.UnsafeFPMath) {
699 setOperationAction(ISD::FSIN , MVT::f64, Expand);
700 setOperationAction(ISD::FCOS , MVT::f64, Expand);
701 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
703 } else if (!TM.Options.UseSoftFloat) {
704 // f32 and f64 in x87.
705 // Set up the FP register classes.
706 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
707 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
709 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
710 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
711 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
712 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
714 if (!TM.Options.UnsafeFPMath) {
715 setOperationAction(ISD::FSIN , MVT::f64, Expand);
716 setOperationAction(ISD::FSIN , MVT::f32, Expand);
717 setOperationAction(ISD::FCOS , MVT::f64, Expand);
718 setOperationAction(ISD::FCOS , MVT::f32, Expand);
719 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
720 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
722 addLegalFPImmediate(APFloat(+0.0)); // FLD0
723 addLegalFPImmediate(APFloat(+1.0)); // FLD1
724 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
725 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
726 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
727 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
728 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
729 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
732 // We don't support FMA.
733 setOperationAction(ISD::FMA, MVT::f64, Expand);
734 setOperationAction(ISD::FMA, MVT::f32, Expand);
736 // Long double always uses X87.
737 if (!TM.Options.UseSoftFloat) {
738 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
739 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
740 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
742 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
743 addLegalFPImmediate(TmpFlt); // FLD0
745 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
748 APFloat TmpFlt2(+1.0);
749 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
751 addLegalFPImmediate(TmpFlt2); // FLD1
752 TmpFlt2.changeSign();
753 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
756 if (!TM.Options.UnsafeFPMath) {
757 setOperationAction(ISD::FSIN , MVT::f80, Expand);
758 setOperationAction(ISD::FCOS , MVT::f80, Expand);
759 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
762 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
763 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
764 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
765 setOperationAction(ISD::FRINT, MVT::f80, Expand);
766 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
767 setOperationAction(ISD::FMA, MVT::f80, Expand);
770 // Always use a library call for pow.
771 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
772 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
773 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
775 setOperationAction(ISD::FLOG, MVT::f80, Expand);
776 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
777 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
778 setOperationAction(ISD::FEXP, MVT::f80, Expand);
779 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
780 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
781 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
783 // First set operation action for all vector types to either promote
784 // (for widening) or expand (for scalarization). Then we will selectively
785 // turn on ones that can be effectively codegen'd.
786 for (MVT VT : MVT::vector_valuetypes()) {
787 setOperationAction(ISD::ADD , VT, Expand);
788 setOperationAction(ISD::SUB , VT, Expand);
789 setOperationAction(ISD::FADD, VT, Expand);
790 setOperationAction(ISD::FNEG, VT, Expand);
791 setOperationAction(ISD::FSUB, VT, Expand);
792 setOperationAction(ISD::MUL , VT, Expand);
793 setOperationAction(ISD::FMUL, VT, Expand);
794 setOperationAction(ISD::SDIV, VT, Expand);
795 setOperationAction(ISD::UDIV, VT, Expand);
796 setOperationAction(ISD::FDIV, VT, Expand);
797 setOperationAction(ISD::SREM, VT, Expand);
798 setOperationAction(ISD::UREM, VT, Expand);
799 setOperationAction(ISD::LOAD, VT, Expand);
800 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
801 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
802 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
803 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
804 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
805 setOperationAction(ISD::FABS, VT, Expand);
806 setOperationAction(ISD::FSIN, VT, Expand);
807 setOperationAction(ISD::FSINCOS, VT, Expand);
808 setOperationAction(ISD::FCOS, VT, Expand);
809 setOperationAction(ISD::FSINCOS, VT, Expand);
810 setOperationAction(ISD::FREM, VT, Expand);
811 setOperationAction(ISD::FMA, VT, Expand);
812 setOperationAction(ISD::FPOWI, VT, Expand);
813 setOperationAction(ISD::FSQRT, VT, Expand);
814 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
815 setOperationAction(ISD::FFLOOR, VT, Expand);
816 setOperationAction(ISD::FCEIL, VT, Expand);
817 setOperationAction(ISD::FTRUNC, VT, Expand);
818 setOperationAction(ISD::FRINT, VT, Expand);
819 setOperationAction(ISD::FNEARBYINT, VT, Expand);
820 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
821 setOperationAction(ISD::MULHS, VT, Expand);
822 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
823 setOperationAction(ISD::MULHU, VT, Expand);
824 setOperationAction(ISD::SDIVREM, VT, Expand);
825 setOperationAction(ISD::UDIVREM, VT, Expand);
826 setOperationAction(ISD::FPOW, VT, Expand);
827 setOperationAction(ISD::CTPOP, VT, Expand);
828 setOperationAction(ISD::CTTZ, VT, Expand);
829 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
830 setOperationAction(ISD::CTLZ, VT, Expand);
831 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
832 setOperationAction(ISD::SHL, VT, Expand);
833 setOperationAction(ISD::SRA, VT, Expand);
834 setOperationAction(ISD::SRL, VT, Expand);
835 setOperationAction(ISD::ROTL, VT, Expand);
836 setOperationAction(ISD::ROTR, VT, Expand);
837 setOperationAction(ISD::BSWAP, VT, Expand);
838 setOperationAction(ISD::SETCC, VT, Expand);
839 setOperationAction(ISD::FLOG, VT, Expand);
840 setOperationAction(ISD::FLOG2, VT, Expand);
841 setOperationAction(ISD::FLOG10, VT, Expand);
842 setOperationAction(ISD::FEXP, VT, Expand);
843 setOperationAction(ISD::FEXP2, VT, Expand);
844 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
845 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
846 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
847 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
848 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
849 setOperationAction(ISD::TRUNCATE, VT, Expand);
850 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
851 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
852 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
853 setOperationAction(ISD::VSELECT, VT, Expand);
854 setOperationAction(ISD::SELECT_CC, VT, Expand);
855 for (MVT InnerVT : MVT::vector_valuetypes()) {
856 setTruncStoreAction(InnerVT, VT, Expand);
858 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
859 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
861 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
862 // types, we have to deal with them whether we ask for Expansion or not.
863 // Setting Expand causes its own optimisation problems though, so leave
865 if (VT.getVectorElementType() == MVT::i1)
866 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
870 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
871 // with -msoft-float, disable use of MMX as well.
872 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
873 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
874 // No operations on x86mmx supported, everything uses intrinsics.
877 // MMX-sized vectors (other than x86mmx) are expected to be expanded
878 // into smaller operations.
879 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
880 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
881 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
882 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
883 setOperationAction(ISD::AND, MVT::v8i8, Expand);
884 setOperationAction(ISD::AND, MVT::v4i16, Expand);
885 setOperationAction(ISD::AND, MVT::v2i32, Expand);
886 setOperationAction(ISD::AND, MVT::v1i64, Expand);
887 setOperationAction(ISD::OR, MVT::v8i8, Expand);
888 setOperationAction(ISD::OR, MVT::v4i16, Expand);
889 setOperationAction(ISD::OR, MVT::v2i32, Expand);
890 setOperationAction(ISD::OR, MVT::v1i64, Expand);
891 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
892 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
893 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
894 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
895 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
896 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
897 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
898 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
899 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
900 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
901 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
902 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
903 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
904 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
905 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
906 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
907 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
909 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
910 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
912 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
913 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
914 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
915 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
916 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
917 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
918 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
919 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
920 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
921 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
922 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
923 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
924 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
925 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
928 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
929 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
931 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
932 // registers cannot be used even for integer operations.
933 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
934 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
935 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
936 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
938 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
939 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
940 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
941 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
942 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
943 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
944 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
945 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
946 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
947 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
948 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
949 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
950 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
951 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
952 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
953 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
954 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
955 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
956 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
957 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
958 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
959 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
961 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
962 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
963 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
964 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
966 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
967 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
968 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
969 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
970 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
972 // Only provide customized ctpop vector bit twiddling for vector types we
973 // know to perform better than using the popcnt instructions on each vector
974 // element. If popcnt isn't supported, always provide the custom version.
975 if (!Subtarget->hasPOPCNT()) {
976 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
977 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
980 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
981 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
982 MVT VT = (MVT::SimpleValueType)i;
983 // Do not attempt to custom lower non-power-of-2 vectors
984 if (!isPowerOf2_32(VT.getVectorNumElements()))
986 // Do not attempt to custom lower non-128-bit vectors
987 if (!VT.is128BitVector())
989 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
990 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
991 setOperationAction(ISD::VSELECT, VT, Custom);
992 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
995 // We support custom legalizing of sext and anyext loads for specific
996 // memory vector types which we can load as a scalar (or sequence of
997 // scalars) and extend in-register to a legal 128-bit vector type. For sext
998 // loads these must work with a single scalar load.
999 for (MVT VT : MVT::integer_vector_valuetypes()) {
1000 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1001 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1002 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1003 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1004 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1005 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1006 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1007 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1011 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1012 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1013 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1014 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1015 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1016 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1017 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1018 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1020 if (Subtarget->is64Bit()) {
1021 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1022 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1025 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1026 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1027 MVT VT = (MVT::SimpleValueType)i;
1029 // Do not attempt to promote non-128-bit vectors
1030 if (!VT.is128BitVector())
1033 setOperationAction(ISD::AND, VT, Promote);
1034 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1035 setOperationAction(ISD::OR, VT, Promote);
1036 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1037 setOperationAction(ISD::XOR, VT, Promote);
1038 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1039 setOperationAction(ISD::LOAD, VT, Promote);
1040 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1041 setOperationAction(ISD::SELECT, VT, Promote);
1042 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1045 // Custom lower v2i64 and v2f64 selects.
1046 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1047 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1048 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1049 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1051 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1052 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1054 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1055 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1056 // As there is no 64-bit GPR available, we need build a special custom
1057 // sequence to convert from v2i32 to v2f32.
1058 if (!Subtarget->is64Bit())
1059 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1061 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1062 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1064 for (MVT VT : MVT::fp_vector_valuetypes())
1065 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1067 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1068 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1069 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1072 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1073 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1074 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1075 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1076 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1077 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1078 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1079 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1080 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1081 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1082 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1084 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1085 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1086 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1087 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1090 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1091 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1092 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1095 // FIXME: Do we need to handle scalar-to-vector here?
1096 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1098 // We directly match byte blends in the backend as they match the VSELECT
1100 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1102 // SSE41 brings specific instructions for doing vector sign extend even in
1103 // cases where we don't have SRA.
1104 for (MVT VT : MVT::integer_vector_valuetypes()) {
1105 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1106 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1107 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1110 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1111 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1112 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1113 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1114 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1115 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1116 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1118 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1119 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1120 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1121 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1122 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1123 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1125 // i8 and i16 vectors are custom because the source register and source
1126 // source memory operand types are not the same width. f32 vectors are
1127 // custom since the immediate controlling the insert encodes additional
1129 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1130 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1131 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1132 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1134 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1135 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1136 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1137 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1139 // FIXME: these should be Legal, but that's only for the case where
1140 // the index is constant. For now custom expand to deal with that.
1141 if (Subtarget->is64Bit()) {
1142 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1147 if (Subtarget->hasSSE2()) {
1148 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1149 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1151 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1152 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1154 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1155 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1157 // In the customized shift lowering, the legal cases in AVX2 will be
1159 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1160 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1162 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1163 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1165 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1168 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1169 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1170 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1171 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1172 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1173 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1174 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1176 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1177 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1178 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1180 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1181 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1182 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1183 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1184 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1185 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1186 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1187 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1188 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1191 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1193 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1194 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1195 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1196 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1197 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1198 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1199 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1200 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1201 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1204 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1206 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1207 // even though v8i16 is a legal type.
1208 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1209 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1210 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1212 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1213 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1214 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1216 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1217 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1219 for (MVT VT : MVT::fp_vector_valuetypes())
1220 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1222 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1223 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1225 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1226 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1228 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1229 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1231 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1232 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1233 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1234 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1236 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1237 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1238 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1240 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1241 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1243 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1244 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1245 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1246 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1247 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1248 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1249 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1250 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1251 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1253 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1254 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1255 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1256 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1257 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1258 setOperationAction(ISD::FMA, MVT::f32, Legal);
1259 setOperationAction(ISD::FMA, MVT::f64, Legal);
1262 if (Subtarget->hasInt256()) {
1263 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1264 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1265 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1266 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1268 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1269 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1270 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1271 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1273 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1274 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1275 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1276 // Don't lower v32i8 because there is no 128-bit byte mul
1278 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1279 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1280 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1281 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1283 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1284 // when we have a 256bit-wide blend with immediate.
1285 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1287 // Only provide customized ctpop vector bit twiddling for vector types we
1288 // know to perform better than using the popcnt instructions on each
1289 // vector element. If popcnt isn't supported, always provide the custom
1291 if (!Subtarget->hasPOPCNT())
1292 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1294 // Custom CTPOP always performs better on natively supported v8i32
1295 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1297 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1298 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1299 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1300 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1301 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1302 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1303 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1305 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1306 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1307 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1308 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1309 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1310 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1312 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1313 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1314 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1315 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1317 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1318 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1319 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1320 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1322 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1323 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1324 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1325 // Don't lower v32i8 because there is no 128-bit byte mul
1328 // In the customized shift lowering, the legal cases in AVX2 will be
1330 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1331 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1333 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1336 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1338 // Custom lower several nodes for 256-bit types.
1339 for (MVT VT : MVT::vector_valuetypes()) {
1340 if (VT.getScalarSizeInBits() >= 32) {
1341 setOperationAction(ISD::MLOAD, VT, Legal);
1342 setOperationAction(ISD::MSTORE, VT, Legal);
1344 // Extract subvector is special because the value type
1345 // (result) is 128-bit but the source is 256-bit wide.
1346 if (VT.is128BitVector()) {
1347 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1349 // Do not attempt to custom lower other non-256-bit vectors
1350 if (!VT.is256BitVector())
1353 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1354 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1355 setOperationAction(ISD::VSELECT, VT, Custom);
1356 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1357 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1358 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1359 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1360 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1363 if (Subtarget->hasInt256())
1364 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1367 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1368 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1369 MVT VT = (MVT::SimpleValueType)i;
1371 // Do not attempt to promote non-256-bit vectors
1372 if (!VT.is256BitVector())
1375 setOperationAction(ISD::AND, VT, Promote);
1376 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1377 setOperationAction(ISD::OR, VT, Promote);
1378 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1379 setOperationAction(ISD::XOR, VT, Promote);
1380 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1381 setOperationAction(ISD::LOAD, VT, Promote);
1382 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1383 setOperationAction(ISD::SELECT, VT, Promote);
1384 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1388 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1389 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1390 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1391 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1392 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1394 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1395 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1396 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1398 for (MVT VT : MVT::fp_vector_valuetypes())
1399 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1401 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1402 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1403 setOperationAction(ISD::XOR, MVT::i1, Legal);
1404 setOperationAction(ISD::OR, MVT::i1, Legal);
1405 setOperationAction(ISD::AND, MVT::i1, Legal);
1406 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1407 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1408 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1409 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1410 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1412 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1413 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1414 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1415 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1416 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1417 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1419 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1420 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1421 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1422 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1423 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1424 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1425 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1426 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1429 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1430 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1431 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1432 if (Subtarget->is64Bit()) {
1433 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1434 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1435 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1436 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1438 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1439 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1441 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1442 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1443 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1444 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1445 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1448 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1449 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1450 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1451 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1453 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1454 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1455 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1456 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1457 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1458 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1459 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1460 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1461 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1462 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1463 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1464 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1465 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1467 setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
1468 setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
1469 setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
1470 setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
1471 setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
1472 setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
1473 setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
1474 setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
1475 setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
1476 setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::BITCAST);
1679 setTargetDAGCombine(ISD::VSELECT);
1680 setTargetDAGCombine(ISD::SELECT);
1681 setTargetDAGCombine(ISD::SHL);
1682 setTargetDAGCombine(ISD::SRA);
1683 setTargetDAGCombine(ISD::SRL);
1684 setTargetDAGCombine(ISD::OR);
1685 setTargetDAGCombine(ISD::AND);
1686 setTargetDAGCombine(ISD::ADD);
1687 setTargetDAGCombine(ISD::FADD);
1688 setTargetDAGCombine(ISD::FSUB);
1689 setTargetDAGCombine(ISD::FMA);
1690 setTargetDAGCombine(ISD::SUB);
1691 setTargetDAGCombine(ISD::LOAD);
1692 setTargetDAGCombine(ISD::MLOAD);
1693 setTargetDAGCombine(ISD::STORE);
1694 setTargetDAGCombine(ISD::MSTORE);
1695 setTargetDAGCombine(ISD::ZERO_EXTEND);
1696 setTargetDAGCombine(ISD::ANY_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND);
1698 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1699 setTargetDAGCombine(ISD::TRUNCATE);
1700 setTargetDAGCombine(ISD::SINT_TO_FP);
1701 setTargetDAGCombine(ISD::SETCC);
1702 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1703 setTargetDAGCombine(ISD::BUILD_VECTOR);
1704 setTargetDAGCombine(ISD::MUL);
1705 setTargetDAGCombine(ISD::XOR);
1707 computeRegisterProperties();
1709 // On Darwin, -Os means optimize for size without hurting performance,
1710 // do not reduce the limit.
1711 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1712 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1713 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1714 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1715 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1716 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1717 setPrefLoopAlignment(4); // 2^4 bytes.
1719 // Predictable cmov don't hurt on atom because it's in-order.
1720 PredictableSelectIsExpensive = !Subtarget->isAtom();
1721 EnableExtLdPromotion = true;
1722 setPrefFunctionAlignment(4); // 2^4 bytes.
1724 verifyIntrinsicTables();
1727 // This has so far only been implemented for 64-bit MachO.
1728 bool X86TargetLowering::useLoadStackGuardNode() const {
1729 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1732 TargetLoweringBase::LegalizeTypeAction
1733 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1734 if (ExperimentalVectorWideningLegalization &&
1735 VT.getVectorNumElements() != 1 &&
1736 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1737 return TypeWidenVector;
1739 return TargetLoweringBase::getPreferredVectorAction(VT);
1742 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1744 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1746 const unsigned NumElts = VT.getVectorNumElements();
1747 const EVT EltVT = VT.getVectorElementType();
1748 if (VT.is512BitVector()) {
1749 if (Subtarget->hasAVX512())
1750 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1751 EltVT == MVT::f32 || EltVT == MVT::f64)
1753 case 8: return MVT::v8i1;
1754 case 16: return MVT::v16i1;
1756 if (Subtarget->hasBWI())
1757 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1759 case 32: return MVT::v32i1;
1760 case 64: return MVT::v64i1;
1764 if (VT.is256BitVector() || VT.is128BitVector()) {
1765 if (Subtarget->hasVLX())
1766 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1767 EltVT == MVT::f32 || EltVT == MVT::f64)
1769 case 2: return MVT::v2i1;
1770 case 4: return MVT::v4i1;
1771 case 8: return MVT::v8i1;
1773 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1774 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1776 case 8: return MVT::v8i1;
1777 case 16: return MVT::v16i1;
1778 case 32: return MVT::v32i1;
1782 return VT.changeVectorElementTypeToInteger();
1785 /// Helper for getByValTypeAlignment to determine
1786 /// the desired ByVal argument alignment.
1787 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1790 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1791 if (VTy->getBitWidth() == 128)
1793 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1794 unsigned EltAlign = 0;
1795 getMaxByValAlign(ATy->getElementType(), EltAlign);
1796 if (EltAlign > MaxAlign)
1797 MaxAlign = EltAlign;
1798 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1799 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1800 unsigned EltAlign = 0;
1801 getMaxByValAlign(STy->getElementType(i), EltAlign);
1802 if (EltAlign > MaxAlign)
1803 MaxAlign = EltAlign;
1810 /// Return the desired alignment for ByVal aggregate
1811 /// function arguments in the caller parameter area. For X86, aggregates
1812 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1813 /// are at 4-byte boundaries.
1814 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1815 if (Subtarget->is64Bit()) {
1816 // Max of 8 and alignment of type.
1817 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1824 if (Subtarget->hasSSE1())
1825 getMaxByValAlign(Ty, Align);
1829 /// Returns the target specific optimal type for load
1830 /// and store operations as a result of memset, memcpy, and memmove
1831 /// lowering. If DstAlign is zero that means it's safe to destination
1832 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1833 /// means there isn't a need to check it against alignment requirement,
1834 /// probably because the source does not need to be loaded. If 'IsMemset' is
1835 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1836 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1837 /// source is constant so it does not need to be loaded.
1838 /// It returns EVT::Other if the type should be determined using generic
1839 /// target-independent logic.
1841 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1842 unsigned DstAlign, unsigned SrcAlign,
1843 bool IsMemset, bool ZeroMemset,
1845 MachineFunction &MF) const {
1846 const Function *F = MF.getFunction();
1847 if ((!IsMemset || ZeroMemset) &&
1848 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2110 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2111 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2112 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2113 // either case FuncInfo->setSRetReturnReg() will have been called.
2114 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2115 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2116 "No need for an sret register");
2117 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2120 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2121 X86::RAX : X86::EAX;
2122 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2123 Flag = Chain.getValue(1);
2125 // RAX/EAX now acts like a return value.
2126 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2129 RetOps[0] = Chain; // Update chain.
2131 // Add the flag if we have it.
2133 RetOps.push_back(Flag);
2135 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2138 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2139 if (N->getNumValues() != 1)
2141 if (!N->hasNUsesOfValue(1, 0))
2144 SDValue TCChain = Chain;
2145 SDNode *Copy = *N->use_begin();
2146 if (Copy->getOpcode() == ISD::CopyToReg) {
2147 // If the copy has a glue operand, we conservatively assume it isn't safe to
2148 // perform a tail call.
2149 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2151 TCChain = Copy->getOperand(0);
2152 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2155 bool HasRet = false;
2156 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2158 if (UI->getOpcode() != X86ISD::RET_FLAG)
2160 // If we are returning more than one value, we can definitely
2161 // not make a tail call see PR19530
2162 if (UI->getNumOperands() > 4)
2164 if (UI->getNumOperands() == 4 &&
2165 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2178 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2179 ISD::NodeType ExtendKind) const {
2181 // TODO: Is this also valid on 32-bit?
2182 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2183 ReturnMVT = MVT::i8;
2185 ReturnMVT = MVT::i32;
2187 EVT MinVT = getRegisterType(Context, ReturnMVT);
2188 return VT.bitsLT(MinVT) ? MinVT : VT;
2191 /// Lower the result values of a call into the
2192 /// appropriate copies out of appropriate physical registers.
2195 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2196 CallingConv::ID CallConv, bool isVarArg,
2197 const SmallVectorImpl<ISD::InputArg> &Ins,
2198 SDLoc dl, SelectionDAG &DAG,
2199 SmallVectorImpl<SDValue> &InVals) const {
2201 // Assign locations to each value returned by this call.
2202 SmallVector<CCValAssign, 16> RVLocs;
2203 bool Is64Bit = Subtarget->is64Bit();
2204 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2206 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2208 // Copy all of the result registers out of their specified physreg.
2209 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2210 CCValAssign &VA = RVLocs[i];
2211 EVT CopyVT = VA.getValVT();
2213 // If this is x86-64, and we disabled SSE, we can't return FP values
2214 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2215 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2216 report_fatal_error("SSE register return with SSE disabled");
2219 // If we prefer to use the value in xmm registers, copy it out as f80 and
2220 // use a truncate to move it from fp stack reg to xmm reg.
2221 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2222 isScalarFPTypeInSSEReg(VA.getValVT()))
2225 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2226 CopyVT, InFlag).getValue(1);
2227 SDValue Val = Chain.getValue(0);
2229 if (CopyVT != VA.getValVT())
2230 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2231 // This truncation won't change the value.
2232 DAG.getIntPtrConstant(1));
2234 InFlag = Chain.getValue(2);
2235 InVals.push_back(Val);
2241 //===----------------------------------------------------------------------===//
2242 // C & StdCall & Fast Calling Convention implementation
2243 //===----------------------------------------------------------------------===//
2244 // StdCall calling convention seems to be standard for many Windows' API
2245 // routines and around. It differs from C calling convention just a little:
2246 // callee should clean up the stack, not caller. Symbols should be also
2247 // decorated in some fancy way :) It doesn't support any vector arguments.
2248 // For info on fast calling convention see Fast Calling Convention (tail call)
2249 // implementation LowerX86_32FastCCCallTo.
2251 /// CallIsStructReturn - Determines whether a call uses struct return
2253 enum StructReturnType {
2258 static StructReturnType
2259 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2261 return NotStructReturn;
2263 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2264 if (!Flags.isSRet())
2265 return NotStructReturn;
2266 if (Flags.isInReg())
2267 return RegStructReturn;
2268 return StackStructReturn;
2271 /// Determines whether a function uses struct return semantics.
2272 static StructReturnType
2273 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2275 return NotStructReturn;
2277 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2278 if (!Flags.isSRet())
2279 return NotStructReturn;
2280 if (Flags.isInReg())
2281 return RegStructReturn;
2282 return StackStructReturn;
2285 /// Make a copy of an aggregate at address specified by "Src" to address
2286 /// "Dst" with size and alignment information specified by the specific
2287 /// parameter attribute. The copy will be passed as a byval function parameter.
2289 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2290 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2292 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2294 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2295 /*isVolatile*/false, /*AlwaysInline=*/true,
2296 MachinePointerInfo(), MachinePointerInfo());
2299 /// Return true if the calling convention is one that
2300 /// supports tail call optimization.
2301 static bool IsTailCallConvention(CallingConv::ID CC) {
2302 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2303 CC == CallingConv::HiPE);
2306 /// \brief Return true if the calling convention is a C calling convention.
2307 static bool IsCCallConvention(CallingConv::ID CC) {
2308 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2309 CC == CallingConv::X86_64_SysV);
2312 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2313 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2317 CallingConv::ID CalleeCC = CS.getCallingConv();
2318 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2324 /// Return true if the function is being made into
2325 /// a tailcall target by changing its ABI.
2326 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2327 bool GuaranteedTailCallOpt) {
2328 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2332 X86TargetLowering::LowerMemArgument(SDValue Chain,
2333 CallingConv::ID CallConv,
2334 const SmallVectorImpl<ISD::InputArg> &Ins,
2335 SDLoc dl, SelectionDAG &DAG,
2336 const CCValAssign &VA,
2337 MachineFrameInfo *MFI,
2339 // Create the nodes corresponding to a load from this parameter slot.
2340 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2341 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2342 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2343 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2346 // If value is passed by pointer we have address passed instead of the value
2348 if (VA.getLocInfo() == CCValAssign::Indirect)
2349 ValVT = VA.getLocVT();
2351 ValVT = VA.getValVT();
2353 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2354 // changed with more analysis.
2355 // In case of tail call optimization mark all arguments mutable. Since they
2356 // could be overwritten by lowering of arguments in case of a tail call.
2357 if (Flags.isByVal()) {
2358 unsigned Bytes = Flags.getByValSize();
2359 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2360 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2361 return DAG.getFrameIndex(FI, getPointerTy());
2363 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2364 VA.getLocMemOffset(), isImmutable);
2365 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2366 return DAG.getLoad(ValVT, dl, Chain, FIN,
2367 MachinePointerInfo::getFixedStack(FI),
2368 false, false, false, 0);
2372 // FIXME: Get this from tablegen.
2373 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2374 const X86Subtarget *Subtarget) {
2375 assert(Subtarget->is64Bit());
2377 if (Subtarget->isCallingConvWin64(CallConv)) {
2378 static const MCPhysReg GPR64ArgRegsWin64[] = {
2379 X86::RCX, X86::RDX, X86::R8, X86::R9
2381 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2384 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2385 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2387 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2390 // FIXME: Get this from tablegen.
2391 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2392 CallingConv::ID CallConv,
2393 const X86Subtarget *Subtarget) {
2394 assert(Subtarget->is64Bit());
2395 if (Subtarget->isCallingConvWin64(CallConv)) {
2396 // The XMM registers which might contain var arg parameters are shadowed
2397 // in their paired GPR. So we only need to save the GPR to their home
2399 // TODO: __vectorcall will change this.
2403 const Function *Fn = MF.getFunction();
2404 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2573 "SSE register cannot be used when SSE is disabled!");
2575 // 64-bit calling conventions support varargs and register parameters, so we
2576 // have to do extra work to spill them in the prologue.
2577 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2578 // Find the first unallocated argument registers.
2579 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2580 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2581 unsigned NumIntRegs =
2582 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2583 unsigned NumXMMRegs =
2584 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2585 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2586 "SSE register cannot be used when SSE is disabled!");
2588 // Gather all the live in physical registers.
2589 SmallVector<SDValue, 6> LiveGPRs;
2590 SmallVector<SDValue, 8> LiveXMMRegs;
2592 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2593 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2595 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2597 if (!ArgXMMs.empty()) {
2598 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2599 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2600 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2601 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2602 LiveXMMRegs.push_back(
2603 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2608 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2609 // Get to the caller-allocated home save location. Add 8 to account
2610 // for the return address.
2611 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2612 FuncInfo->setRegSaveFrameIndex(
2613 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2614 // Fixup to set vararg frame on shadow area (4 x i64).
2616 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2618 // For X86-64, if there are vararg parameters that are passed via
2619 // registers, then we must store them to their spots on the stack so
2620 // they may be loaded by deferencing the result of va_next.
2621 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2622 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2623 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2624 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2627 // Store the integer parameter registers.
2628 SmallVector<SDValue, 8> MemOps;
2629 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2631 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2632 for (SDValue Val : LiveGPRs) {
2633 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2634 DAG.getIntPtrConstant(Offset));
2636 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2637 MachinePointerInfo::getFixedStack(
2638 FuncInfo->getRegSaveFrameIndex(), Offset),
2640 MemOps.push_back(Store);
2644 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2645 // Now store the XMM (fp + vector) parameter registers.
2646 SmallVector<SDValue, 12> SaveXMMOps;
2647 SaveXMMOps.push_back(Chain);
2648 SaveXMMOps.push_back(ALVal);
2649 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2650 FuncInfo->getRegSaveFrameIndex()));
2651 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2652 FuncInfo->getVarArgsFPOffset()));
2653 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2655 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2656 MVT::Other, SaveXMMOps));
2659 if (!MemOps.empty())
2660 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2663 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2664 // Find the largest legal vector type.
2665 MVT VecVT = MVT::Other;
2666 // FIXME: Only some x86_32 calling conventions support AVX512.
2667 if (Subtarget->hasAVX512() &&
2668 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2669 CallConv == CallingConv::Intel_OCL_BI)))
2670 VecVT = MVT::v16f32;
2671 else if (Subtarget->hasAVX())
2673 else if (Subtarget->hasSSE2())
2676 // We forward some GPRs and some vector types.
2677 SmallVector<MVT, 2> RegParmTypes;
2678 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2679 RegParmTypes.push_back(IntVT);
2680 if (VecVT != MVT::Other)
2681 RegParmTypes.push_back(VecVT);
2683 // Compute the set of forwarded registers. The rest are scratch.
2684 SmallVectorImpl<ForwardedRegister> &Forwards =
2685 FuncInfo->getForwardedMustTailRegParms();
2686 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2688 // Conservatively forward AL on x86_64, since it might be used for varargs.
2689 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2690 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2691 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2694 // Copy all forwards from physical to virtual registers.
2695 for (ForwardedRegister &F : Forwards) {
2696 // FIXME: Can we use a less constrained schedule?
2697 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2698 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2699 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2703 // Some CCs need callee pop.
2704 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2705 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2706 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2708 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2709 // If this is an sret function, the return should pop the hidden pointer.
2710 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2711 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2712 argsAreStructReturn(Ins) == StackStructReturn)
2713 FuncInfo->setBytesToPopOnReturn(4);
2717 // RegSaveFrameIndex is X86-64 only.
2718 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2719 if (CallConv == CallingConv::X86_FastCall ||
2720 CallConv == CallingConv::X86_ThisCall)
2721 // fastcc functions can't have varargs.
2722 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2725 FuncInfo->setArgumentStackSize(StackSize);
2731 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2732 SDValue StackPtr, SDValue Arg,
2733 SDLoc dl, SelectionDAG &DAG,
2734 const CCValAssign &VA,
2735 ISD::ArgFlagsTy Flags) const {
2736 unsigned LocMemOffset = VA.getLocMemOffset();
2737 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2738 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2739 if (Flags.isByVal())
2740 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2742 return DAG.getStore(Chain, dl, Arg, PtrOff,
2743 MachinePointerInfo::getStack(LocMemOffset),
2747 /// Emit a load of return address if tail call
2748 /// optimization is performed and it is required.
2750 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2751 SDValue &OutRetAddr, SDValue Chain,
2752 bool IsTailCall, bool Is64Bit,
2753 int FPDiff, SDLoc dl) const {
2754 // Adjust the Return address stack slot.
2755 EVT VT = getPointerTy();
2756 OutRetAddr = getReturnAddressFrameIndex(DAG);
2758 // Load the "old" Return address.
2759 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2760 false, false, false, 0);
2761 return SDValue(OutRetAddr.getNode(), 1);
2764 /// Emit a store of the return address if tail call
2765 /// optimization is performed and it is required (FPDiff!=0).
2766 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2767 SDValue Chain, SDValue RetAddrFrIdx,
2768 EVT PtrVT, unsigned SlotSize,
2769 int FPDiff, SDLoc dl) {
2770 // Store the return address to the appropriate stack slot.
2771 if (!FPDiff) return Chain;
2772 // Calculate the new stack slot for the return address.
2773 int NewReturnAddrFI =
2774 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2776 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2777 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2778 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2784 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2785 SmallVectorImpl<SDValue> &InVals) const {
2786 SelectionDAG &DAG = CLI.DAG;
2788 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2789 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2790 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2791 SDValue Chain = CLI.Chain;
2792 SDValue Callee = CLI.Callee;
2793 CallingConv::ID CallConv = CLI.CallConv;
2794 bool &isTailCall = CLI.IsTailCall;
2795 bool isVarArg = CLI.IsVarArg;
2797 MachineFunction &MF = DAG.getMachineFunction();
2798 bool Is64Bit = Subtarget->is64Bit();
2799 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2800 StructReturnType SR = callIsStructReturn(Outs);
2801 bool IsSibcall = false;
2802 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2804 if (MF.getTarget().Options.DisableTailCalls)
2807 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2809 // Force this to be a tail call. The verifier rules are enough to ensure
2810 // that we can lower this successfully without moving the return address
2813 } else if (isTailCall) {
2814 // Check if it's really possible to do a tail call.
2815 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2816 isVarArg, SR != NotStructReturn,
2817 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2818 Outs, OutVals, Ins, DAG);
2820 // Sibcalls are automatically detected tailcalls which do not require
2822 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2829 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2830 "Var args not supported with calling convention fastcc, ghc or hipe");
2832 // Analyze operands of the call, assigning locations to each operand.
2833 SmallVector<CCValAssign, 16> ArgLocs;
2834 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2836 // Allocate shadow area for Win64
2838 CCInfo.AllocateStack(32, 8);
2840 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2842 // Get a count of how many bytes are to be pushed on the stack.
2843 unsigned NumBytes = CCInfo.getNextStackOffset();
2845 // This is a sibcall. The memory operands are available in caller's
2846 // own caller's stack.
2848 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2849 IsTailCallConvention(CallConv))
2850 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2853 if (isTailCall && !IsSibcall && !IsMustTail) {
2854 // Lower arguments at fp - stackoffset + fpdiff.
2855 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2857 FPDiff = NumBytesCallerPushed - NumBytes;
2859 // Set the delta of movement of the returnaddr stackslot.
2860 // But only set if delta is greater than previous delta.
2861 if (FPDiff < X86Info->getTCReturnAddrDelta())
2862 X86Info->setTCReturnAddrDelta(FPDiff);
2865 unsigned NumBytesToPush = NumBytes;
2866 unsigned NumBytesToPop = NumBytes;
2868 // If we have an inalloca argument, all stack space has already been allocated
2869 // for us and be right at the top of the stack. We don't support multiple
2870 // arguments passed in memory when using inalloca.
2871 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2873 if (!ArgLocs.back().isMemLoc())
2874 report_fatal_error("cannot use inalloca attribute on a register "
2876 if (ArgLocs.back().getLocMemOffset() != 0)
2877 report_fatal_error("any parameter with the inalloca attribute must be "
2878 "the only memory argument");
2882 Chain = DAG.getCALLSEQ_START(
2883 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2885 SDValue RetAddrFrIdx;
2886 // Load return address for tail calls.
2887 if (isTailCall && FPDiff)
2888 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2889 Is64Bit, FPDiff, dl);
2891 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2892 SmallVector<SDValue, 8> MemOpChains;
2895 // Walk the register/memloc assignments, inserting copies/loads. In the case
2896 // of tail call optimization arguments are handle later.
2897 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2899 // Skip inalloca arguments, they have already been written.
2900 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2901 if (Flags.isInAlloca())
2904 CCValAssign &VA = ArgLocs[i];
2905 EVT RegVT = VA.getLocVT();
2906 SDValue Arg = OutVals[i];
2907 bool isByVal = Flags.isByVal();
2909 // Promote the value if needed.
2910 switch (VA.getLocInfo()) {
2911 default: llvm_unreachable("Unknown loc info!");
2912 case CCValAssign::Full: break;
2913 case CCValAssign::SExt:
2914 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2916 case CCValAssign::ZExt:
2917 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2919 case CCValAssign::AExt:
2920 if (RegVT.is128BitVector()) {
2921 // Special case: passing MMX values in XMM registers.
2922 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2923 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2924 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2926 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2928 case CCValAssign::BCvt:
2929 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2931 case CCValAssign::Indirect: {
2932 // Store the argument.
2933 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2934 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2935 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2936 MachinePointerInfo::getFixedStack(FI),
2943 if (VA.isRegLoc()) {
2944 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2945 if (isVarArg && IsWin64) {
2946 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2947 // shadow reg if callee is a varargs function.
2948 unsigned ShadowReg = 0;
2949 switch (VA.getLocReg()) {
2950 case X86::XMM0: ShadowReg = X86::RCX; break;
2951 case X86::XMM1: ShadowReg = X86::RDX; break;
2952 case X86::XMM2: ShadowReg = X86::R8; break;
2953 case X86::XMM3: ShadowReg = X86::R9; break;
2956 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2958 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2959 assert(VA.isMemLoc());
2960 if (!StackPtr.getNode())
2961 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2963 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2964 dl, DAG, VA, Flags));
2968 if (!MemOpChains.empty())
2969 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2971 if (Subtarget->isPICStyleGOT()) {
2972 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2975 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2976 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2978 // If we are tail calling and generating PIC/GOT style code load the
2979 // address of the callee into ECX. The value in ecx is used as target of
2980 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2981 // for tail calls on PIC/GOT architectures. Normally we would just put the
2982 // address of GOT into ebx and then call target@PLT. But for tail calls
2983 // ebx would be restored (since ebx is callee saved) before jumping to the
2986 // Note: The actual moving to ECX is done further down.
2987 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2988 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2989 !G->getGlobal()->hasProtectedVisibility())
2990 Callee = LowerGlobalAddress(Callee, DAG);
2991 else if (isa<ExternalSymbolSDNode>(Callee))
2992 Callee = LowerExternalSymbol(Callee, DAG);
2996 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2997 // From AMD64 ABI document:
2998 // For calls that may call functions that use varargs or stdargs
2999 // (prototype-less calls or calls to functions containing ellipsis (...) in
3000 // the declaration) %al is used as hidden argument to specify the number
3001 // of SSE registers used. The contents of %al do not need to match exactly
3002 // the number of registers, but must be an ubound on the number of SSE
3003 // registers used and is in the range 0 - 8 inclusive.
3005 // Count the number of XMM registers allocated.
3006 static const MCPhysReg XMMArgRegs[] = {
3007 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3008 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3010 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3011 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3012 && "SSE registers cannot be used when SSE is disabled");
3014 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3015 DAG.getConstant(NumXMMRegs, MVT::i8)));
3018 if (isVarArg && IsMustTail) {
3019 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3020 for (const auto &F : Forwards) {
3021 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3022 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3026 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3027 // don't need this because the eligibility check rejects calls that require
3028 // shuffling arguments passed in memory.
3029 if (!IsSibcall && isTailCall) {
3030 // Force all the incoming stack arguments to be loaded from the stack
3031 // before any new outgoing arguments are stored to the stack, because the
3032 // outgoing stack slots may alias the incoming argument stack slots, and
3033 // the alias isn't otherwise explicit. This is slightly more conservative
3034 // than necessary, because it means that each store effectively depends
3035 // on every argument instead of just those arguments it would clobber.
3036 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3038 SmallVector<SDValue, 8> MemOpChains2;
3041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3042 CCValAssign &VA = ArgLocs[i];
3045 assert(VA.isMemLoc());
3046 SDValue Arg = OutVals[i];
3047 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3048 // Skip inalloca arguments. They don't require any work.
3049 if (Flags.isInAlloca())
3051 // Create frame index.
3052 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3053 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3054 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3055 FIN = DAG.getFrameIndex(FI, getPointerTy());
3057 if (Flags.isByVal()) {
3058 // Copy relative to framepointer.
3059 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3060 if (!StackPtr.getNode())
3061 StackPtr = DAG.getCopyFromReg(Chain, dl,
3062 RegInfo->getStackRegister(),
3064 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3066 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3070 // Store relative to framepointer.
3071 MemOpChains2.push_back(
3072 DAG.getStore(ArgChain, dl, Arg, FIN,
3073 MachinePointerInfo::getFixedStack(FI),
3078 if (!MemOpChains2.empty())
3079 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3081 // Store the return address to the appropriate stack slot.
3082 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3083 getPointerTy(), RegInfo->getSlotSize(),
3087 // Build a sequence of copy-to-reg nodes chained together with token chain
3088 // and flag operands which copy the outgoing args into registers.
3090 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3091 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3092 RegsToPass[i].second, InFlag);
3093 InFlag = Chain.getValue(1);
3096 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3097 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3098 // In the 64-bit large code model, we have to make all calls
3099 // through a register, since the call instruction's 32-bit
3100 // pc-relative offset may not be large enough to hold the whole
3102 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3103 // If the callee is a GlobalAddress node (quite common, every direct call
3104 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3106 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3108 // We should use extra load for direct calls to dllimported functions in
3110 const GlobalValue *GV = G->getGlobal();
3111 if (!GV->hasDLLImportStorageClass()) {
3112 unsigned char OpFlags = 0;
3113 bool ExtraLoad = false;
3114 unsigned WrapperKind = ISD::DELETED_NODE;
3116 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3117 // external symbols most go through the PLT in PIC mode. If the symbol
3118 // has hidden or protected visibility, or if it is static or local, then
3119 // we don't need to use the PLT - we can directly call it.
3120 if (Subtarget->isTargetELF() &&
3121 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3122 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3123 OpFlags = X86II::MO_PLT;
3124 } else if (Subtarget->isPICStyleStubAny() &&
3125 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3126 (!Subtarget->getTargetTriple().isMacOSX() ||
3127 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3128 // PC-relative references to external symbols should go through $stub,
3129 // unless we're building with the leopard linker or later, which
3130 // automatically synthesizes these stubs.
3131 OpFlags = X86II::MO_DARWIN_STUB;
3132 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3133 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3134 // If the function is marked as non-lazy, generate an indirect call
3135 // which loads from the GOT directly. This avoids runtime overhead
3136 // at the cost of eager binding (and one extra byte of encoding).
3137 OpFlags = X86II::MO_GOTPCREL;
3138 WrapperKind = X86ISD::WrapperRIP;
3142 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3143 G->getOffset(), OpFlags);
3145 // Add a wrapper if needed.
3146 if (WrapperKind != ISD::DELETED_NODE)
3147 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3148 // Add extra indirection if needed.
3150 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3151 MachinePointerInfo::getGOT(),
3152 false, false, false, 0);
3154 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3155 unsigned char OpFlags = 0;
3157 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3158 // external symbols should go through the PLT.
3159 if (Subtarget->isTargetELF() &&
3160 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3161 OpFlags = X86II::MO_PLT;
3162 } else if (Subtarget->isPICStyleStubAny() &&
3163 (!Subtarget->getTargetTriple().isMacOSX() ||
3164 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3165 // PC-relative references to external symbols should go through $stub,
3166 // unless we're building with the leopard linker or later, which
3167 // automatically synthesizes these stubs.
3168 OpFlags = X86II::MO_DARWIN_STUB;
3171 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3173 } else if (Subtarget->isTarget64BitILP32() &&
3174 Callee->getValueType(0) == MVT::i32) {
3175 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3176 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3179 // Returns a chain & a flag for retval copy to use.
3180 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3181 SmallVector<SDValue, 8> Ops;
3183 if (!IsSibcall && isTailCall) {
3184 Chain = DAG.getCALLSEQ_END(Chain,
3185 DAG.getIntPtrConstant(NumBytesToPop, true),
3186 DAG.getIntPtrConstant(0, true), InFlag, dl);
3187 InFlag = Chain.getValue(1);
3190 Ops.push_back(Chain);
3191 Ops.push_back(Callee);
3194 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3196 // Add argument registers to the end of the list so that they are known live
3198 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3199 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3200 RegsToPass[i].second.getValueType()));
3202 // Add a register mask operand representing the call-preserved registers.
3203 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3204 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3205 assert(Mask && "Missing call preserved mask for calling convention");
3206 Ops.push_back(DAG.getRegisterMask(Mask));
3208 if (InFlag.getNode())
3209 Ops.push_back(InFlag);
3213 //// If this is the first return lowered for this function, add the regs
3214 //// to the liveout set for the function.
3215 // This isn't right, although it's probably harmless on x86; liveouts
3216 // should be computed from returns not tail calls. Consider a void
3217 // function making a tail call to a function returning int.
3218 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3221 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3222 InFlag = Chain.getValue(1);
3224 // Create the CALLSEQ_END node.
3225 unsigned NumBytesForCalleeToPop;
3226 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3227 DAG.getTarget().Options.GuaranteedTailCallOpt))
3228 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3229 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3230 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3231 SR == StackStructReturn)
3232 // If this is a call to a struct-return function, the callee
3233 // pops the hidden struct pointer, so we have to push it back.
3234 // This is common for Darwin/X86, Linux & Mingw32 targets.
3235 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3236 NumBytesForCalleeToPop = 4;
3238 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3240 // Returns a flag for retval copy to use.
3242 Chain = DAG.getCALLSEQ_END(Chain,
3243 DAG.getIntPtrConstant(NumBytesToPop, true),
3244 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3247 InFlag = Chain.getValue(1);
3250 // Handle result values, copying them out of physregs into vregs that we
3252 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3253 Ins, dl, DAG, InVals);
3256 //===----------------------------------------------------------------------===//
3257 // Fast Calling Convention (tail call) implementation
3258 //===----------------------------------------------------------------------===//
3260 // Like std call, callee cleans arguments, convention except that ECX is
3261 // reserved for storing the tail called function address. Only 2 registers are
3262 // free for argument passing (inreg). Tail call optimization is performed
3264 // * tailcallopt is enabled
3265 // * caller/callee are fastcc
3266 // On X86_64 architecture with GOT-style position independent code only local
3267 // (within module) calls are supported at the moment.
3268 // To keep the stack aligned according to platform abi the function
3269 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3270 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3271 // If a tail called function callee has more arguments than the caller the
3272 // caller needs to make sure that there is room to move the RETADDR to. This is
3273 // achieved by reserving an area the size of the argument delta right after the
3274 // original RETADDR, but before the saved framepointer or the spilled registers
3275 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3287 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3288 /// for a 16 byte align requirement.
3290 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3291 SelectionDAG& DAG) const {
3292 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3293 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3294 unsigned StackAlignment = TFI.getStackAlignment();
3295 uint64_t AlignMask = StackAlignment - 1;
3296 int64_t Offset = StackSize;
3297 unsigned SlotSize = RegInfo->getSlotSize();
3298 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3299 // Number smaller than 12 so just add the difference.
3300 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3302 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3303 Offset = ((~AlignMask) & Offset) + StackAlignment +
3304 (StackAlignment-SlotSize);
3309 /// MatchingStackOffset - Return true if the given stack call argument is
3310 /// already available in the same position (relatively) of the caller's
3311 /// incoming argument stack.
3313 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3314 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3315 const X86InstrInfo *TII) {
3316 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3318 if (Arg.getOpcode() == ISD::CopyFromReg) {
3319 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3320 if (!TargetRegisterInfo::isVirtualRegister(VR))
3322 MachineInstr *Def = MRI->getVRegDef(VR);
3325 if (!Flags.isByVal()) {
3326 if (!TII->isLoadFromStackSlot(Def, FI))
3329 unsigned Opcode = Def->getOpcode();
3330 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3331 Opcode == X86::LEA64_32r) &&
3332 Def->getOperand(1).isFI()) {
3333 FI = Def->getOperand(1).getIndex();
3334 Bytes = Flags.getByValSize();
3338 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3339 if (Flags.isByVal())
3340 // ByVal argument is passed in as a pointer but it's now being
3341 // dereferenced. e.g.
3342 // define @foo(%struct.X* %A) {
3343 // tail call @bar(%struct.X* byval %A)
3346 SDValue Ptr = Ld->getBasePtr();
3347 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3350 FI = FINode->getIndex();
3351 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3352 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3353 FI = FINode->getIndex();
3354 Bytes = Flags.getByValSize();
3358 assert(FI != INT_MAX);
3359 if (!MFI->isFixedObjectIndex(FI))
3361 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3364 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3365 /// for tail call optimization. Targets which want to do tail call
3366 /// optimization should implement this function.
3368 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3369 CallingConv::ID CalleeCC,
3371 bool isCalleeStructRet,
3372 bool isCallerStructRet,
3374 const SmallVectorImpl<ISD::OutputArg> &Outs,
3375 const SmallVectorImpl<SDValue> &OutVals,
3376 const SmallVectorImpl<ISD::InputArg> &Ins,
3377 SelectionDAG &DAG) const {
3378 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3381 // If -tailcallopt is specified, make fastcc functions tail-callable.
3382 const MachineFunction &MF = DAG.getMachineFunction();
3383 const Function *CallerF = MF.getFunction();
3385 // If the function return type is x86_fp80 and the callee return type is not,
3386 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3387 // perform a tailcall optimization here.
3388 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3391 CallingConv::ID CallerCC = CallerF->getCallingConv();
3392 bool CCMatch = CallerCC == CalleeCC;
3393 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3394 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3396 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3397 if (IsTailCallConvention(CalleeCC) && CCMatch)
3402 // Look for obvious safe cases to perform tail call optimization that do not
3403 // require ABI changes. This is what gcc calls sibcall.
3405 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3406 // emit a special epilogue.
3407 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3408 if (RegInfo->needsStackRealignment(MF))
3411 // Also avoid sibcall optimization if either caller or callee uses struct
3412 // return semantics.
3413 if (isCalleeStructRet || isCallerStructRet)
3416 // An stdcall/thiscall caller is expected to clean up its arguments; the
3417 // callee isn't going to do that.
3418 // FIXME: this is more restrictive than needed. We could produce a tailcall
3419 // when the stack adjustment matches. For example, with a thiscall that takes
3420 // only one argument.
3421 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3422 CallerCC == CallingConv::X86_ThisCall))
3425 // Do not sibcall optimize vararg calls unless all arguments are passed via
3427 if (isVarArg && !Outs.empty()) {
3429 // Optimizing for varargs on Win64 is unlikely to be safe without
3430 // additional testing.
3431 if (IsCalleeWin64 || IsCallerWin64)
3434 SmallVector<CCValAssign, 16> ArgLocs;
3435 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3438 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3439 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3440 if (!ArgLocs[i].isRegLoc())
3444 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3445 // stack. Therefore, if it's not used by the call it is not safe to optimize
3446 // this into a sibcall.
3447 bool Unused = false;
3448 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3455 SmallVector<CCValAssign, 16> RVLocs;
3456 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3458 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3459 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3460 CCValAssign &VA = RVLocs[i];
3461 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3466 // If the calling conventions do not match, then we'd better make sure the
3467 // results are returned in the same way as what the caller expects.
3469 SmallVector<CCValAssign, 16> RVLocs1;
3470 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3472 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3474 SmallVector<CCValAssign, 16> RVLocs2;
3475 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3477 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3479 if (RVLocs1.size() != RVLocs2.size())
3481 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3482 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3484 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3486 if (RVLocs1[i].isRegLoc()) {
3487 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3490 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3496 // If the callee takes no arguments then go on to check the results of the
3498 if (!Outs.empty()) {
3499 // Check if stack adjustment is needed. For now, do not do this if any
3500 // argument is passed on the stack.
3501 SmallVector<CCValAssign, 16> ArgLocs;
3502 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3505 // Allocate shadow area for Win64
3507 CCInfo.AllocateStack(32, 8);
3509 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3510 if (CCInfo.getNextStackOffset()) {
3511 MachineFunction &MF = DAG.getMachineFunction();
3512 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3515 // Check if the arguments are already laid out in the right way as
3516 // the caller's fixed stack objects.
3517 MachineFrameInfo *MFI = MF.getFrameInfo();
3518 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3519 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3520 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3521 CCValAssign &VA = ArgLocs[i];
3522 SDValue Arg = OutVals[i];
3523 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3524 if (VA.getLocInfo() == CCValAssign::Indirect)
3526 if (!VA.isRegLoc()) {
3527 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3534 // If the tailcall address may be in a register, then make sure it's
3535 // possible to register allocate for it. In 32-bit, the call address can
3536 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3537 // callee-saved registers are restored. These happen to be the same
3538 // registers used to pass 'inreg' arguments so watch out for those.
3539 if (!Subtarget->is64Bit() &&
3540 ((!isa<GlobalAddressSDNode>(Callee) &&
3541 !isa<ExternalSymbolSDNode>(Callee)) ||
3542 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3543 unsigned NumInRegs = 0;
3544 // In PIC we need an extra register to formulate the address computation
3546 unsigned MaxInRegs =
3547 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3550 CCValAssign &VA = ArgLocs[i];
3553 unsigned Reg = VA.getLocReg();
3556 case X86::EAX: case X86::EDX: case X86::ECX:
3557 if (++NumInRegs == MaxInRegs)
3569 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3570 const TargetLibraryInfo *libInfo) const {
3571 return X86::createFastISel(funcInfo, libInfo);
3574 //===----------------------------------------------------------------------===//
3575 // Other Lowering Hooks
3576 //===----------------------------------------------------------------------===//
3578 static bool MayFoldLoad(SDValue Op) {
3579 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3582 static bool MayFoldIntoStore(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3586 static bool isTargetShuffle(unsigned Opcode) {
3588 default: return false;
3589 case X86ISD::BLENDI:
3590 case X86ISD::PSHUFB:
3591 case X86ISD::PSHUFD:
3592 case X86ISD::PSHUFHW:
3593 case X86ISD::PSHUFLW:
3595 case X86ISD::PALIGNR:
3596 case X86ISD::MOVLHPS:
3597 case X86ISD::MOVLHPD:
3598 case X86ISD::MOVHLPS:
3599 case X86ISD::MOVLPS:
3600 case X86ISD::MOVLPD:
3601 case X86ISD::MOVSHDUP:
3602 case X86ISD::MOVSLDUP:
3603 case X86ISD::MOVDDUP:
3606 case X86ISD::UNPCKL:
3607 case X86ISD::UNPCKH:
3608 case X86ISD::VPERMILPI:
3609 case X86ISD::VPERM2X128:
3610 case X86ISD::VPERMI:
3615 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3616 SDValue V1, SelectionDAG &DAG) {
3618 default: llvm_unreachable("Unknown x86 shuffle node");
3619 case X86ISD::MOVSHDUP:
3620 case X86ISD::MOVSLDUP:
3621 case X86ISD::MOVDDUP:
3622 return DAG.getNode(Opc, dl, VT, V1);
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, unsigned TargetMask,
3628 SelectionDAG &DAG) {
3630 default: llvm_unreachable("Unknown x86 shuffle node");
3631 case X86ISD::PSHUFD:
3632 case X86ISD::PSHUFHW:
3633 case X86ISD::PSHUFLW:
3634 case X86ISD::VPERMILPI:
3635 case X86ISD::VPERMI:
3636 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3640 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3641 SDValue V1, SDValue V2, unsigned TargetMask,
3642 SelectionDAG &DAG) {
3644 default: llvm_unreachable("Unknown x86 shuffle node");
3645 case X86ISD::PALIGNR:
3646 case X86ISD::VALIGN:
3648 case X86ISD::VPERM2X128:
3649 return DAG.getNode(Opc, dl, VT, V1, V2,
3650 DAG.getConstant(TargetMask, MVT::i8));
3654 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3655 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3657 default: llvm_unreachable("Unknown x86 shuffle node");
3658 case X86ISD::MOVLHPS:
3659 case X86ISD::MOVLHPD:
3660 case X86ISD::MOVHLPS:
3661 case X86ISD::MOVLPS:
3662 case X86ISD::MOVLPD:
3665 case X86ISD::UNPCKL:
3666 case X86ISD::UNPCKH:
3667 return DAG.getNode(Opc, dl, VT, V1, V2);
3671 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3672 MachineFunction &MF = DAG.getMachineFunction();
3673 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3674 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3675 int ReturnAddrIndex = FuncInfo->getRAIndex();
3677 if (ReturnAddrIndex == 0) {
3678 // Set up a frame object for the return address.
3679 unsigned SlotSize = RegInfo->getSlotSize();
3680 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3683 FuncInfo->setRAIndex(ReturnAddrIndex);
3686 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3689 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3690 bool hasSymbolicDisplacement) {
3691 // Offset should fit into 32 bit immediate field.
3692 if (!isInt<32>(Offset))
3695 // If we don't have a symbolic displacement - we don't have any extra
3697 if (!hasSymbolicDisplacement)
3700 // FIXME: Some tweaks might be needed for medium code model.
3701 if (M != CodeModel::Small && M != CodeModel::Kernel)
3704 // For small code model we assume that latest object is 16MB before end of 31
3705 // bits boundary. We may also accept pretty large negative constants knowing
3706 // that all objects are in the positive half of address space.
3707 if (M == CodeModel::Small && Offset < 16*1024*1024)
3710 // For kernel code model we know that all object resist in the negative half
3711 // of 32bits address space. We may not accept negative offsets, since they may
3712 // be just off and we may accept pretty large positive ones.
3713 if (M == CodeModel::Kernel && Offset >= 0)
3719 /// isCalleePop - Determines whether the callee is required to pop its
3720 /// own arguments. Callee pop is necessary to support tail calls.
3721 bool X86::isCalleePop(CallingConv::ID CallingConv,
3722 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3723 switch (CallingConv) {
3726 case CallingConv::X86_StdCall:
3727 case CallingConv::X86_FastCall:
3728 case CallingConv::X86_ThisCall:
3730 case CallingConv::Fast:
3731 case CallingConv::GHC:
3732 case CallingConv::HiPE:
3739 /// \brief Return true if the condition is an unsigned comparison operation.
3740 static bool isX86CCUnsigned(unsigned X86CC) {
3742 default: llvm_unreachable("Invalid integer condition!");
3743 case X86::COND_E: return true;
3744 case X86::COND_G: return false;
3745 case X86::COND_GE: return false;
3746 case X86::COND_L: return false;
3747 case X86::COND_LE: return false;
3748 case X86::COND_NE: return true;
3749 case X86::COND_B: return true;
3750 case X86::COND_A: return true;
3751 case X86::COND_BE: return true;
3752 case X86::COND_AE: return true;
3754 llvm_unreachable("covered switch fell through?!");
3757 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3758 /// specific condition code, returning the condition code and the LHS/RHS of the
3759 /// comparison to make.
3760 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3761 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3763 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3764 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3765 // X > -1 -> X == 0, jump !sign.
3766 RHS = DAG.getConstant(0, RHS.getValueType());
3767 return X86::COND_NS;
3769 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3770 // X < 0 -> X == 0, jump on sign.
3773 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3775 RHS = DAG.getConstant(0, RHS.getValueType());
3776 return X86::COND_LE;
3780 switch (SetCCOpcode) {
3781 default: llvm_unreachable("Invalid integer condition!");
3782 case ISD::SETEQ: return X86::COND_E;
3783 case ISD::SETGT: return X86::COND_G;
3784 case ISD::SETGE: return X86::COND_GE;
3785 case ISD::SETLT: return X86::COND_L;
3786 case ISD::SETLE: return X86::COND_LE;
3787 case ISD::SETNE: return X86::COND_NE;
3788 case ISD::SETULT: return X86::COND_B;
3789 case ISD::SETUGT: return X86::COND_A;
3790 case ISD::SETULE: return X86::COND_BE;
3791 case ISD::SETUGE: return X86::COND_AE;
3795 // First determine if it is required or is profitable to flip the operands.
3797 // If LHS is a foldable load, but RHS is not, flip the condition.
3798 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3799 !ISD::isNON_EXTLoad(RHS.getNode())) {
3800 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3801 std::swap(LHS, RHS);
3804 switch (SetCCOpcode) {
3810 std::swap(LHS, RHS);
3814 // On a floating point condition, the flags are set as follows:
3816 // 0 | 0 | 0 | X > Y
3817 // 0 | 0 | 1 | X < Y
3818 // 1 | 0 | 0 | X == Y
3819 // 1 | 1 | 1 | unordered
3820 switch (SetCCOpcode) {
3821 default: llvm_unreachable("Condcode should be pre-legalized away");
3823 case ISD::SETEQ: return X86::COND_E;
3824 case ISD::SETOLT: // flipped
3826 case ISD::SETGT: return X86::COND_A;
3827 case ISD::SETOLE: // flipped
3829 case ISD::SETGE: return X86::COND_AE;
3830 case ISD::SETUGT: // flipped
3832 case ISD::SETLT: return X86::COND_B;
3833 case ISD::SETUGE: // flipped
3835 case ISD::SETLE: return X86::COND_BE;
3837 case ISD::SETNE: return X86::COND_NE;
3838 case ISD::SETUO: return X86::COND_P;
3839 case ISD::SETO: return X86::COND_NP;
3841 case ISD::SETUNE: return X86::COND_INVALID;
3845 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3846 /// code. Current x86 isa includes the following FP cmov instructions:
3847 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3848 static bool hasFPCMov(unsigned X86CC) {
3864 /// isFPImmLegal - Returns true if the target can instruction select the
3865 /// specified FP immediate natively. If false, the legalizer will
3866 /// materialize the FP immediate as a load from a constant pool.
3867 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3868 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3869 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3875 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3876 ISD::LoadExtType ExtTy,
3878 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3879 // relocation target a movq or addq instruction: don't let the load shrink.
3880 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3881 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3882 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3883 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3887 /// \brief Returns true if it is beneficial to convert a load of a constant
3888 /// to just the constant itself.
3889 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3891 assert(Ty->isIntegerTy());
3893 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3894 if (BitSize == 0 || BitSize > 64)
3899 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3900 unsigned Index) const {
3901 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3904 return (Index == 0 || Index == ResVT.getVectorNumElements());
3907 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3908 // Speculate cttz only if we can directly use TZCNT.
3909 return Subtarget->hasBMI();
3912 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3913 // Speculate ctlz only if we can directly use LZCNT.
3914 return Subtarget->hasLZCNT();
3917 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3918 /// the specified range (L, H].
3919 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3920 return (Val < 0) || (Val >= Low && Val < Hi);
3923 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3924 /// specified value.
3925 static bool isUndefOrEqual(int Val, int CmpVal) {
3926 return (Val < 0 || Val == CmpVal);
3929 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3930 /// from position Pos and ending in Pos+Size, falls within the specified
3931 /// sequential range (Low, Low+Size]. or is undef.
3932 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3933 unsigned Pos, unsigned Size, int Low) {
3934 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3935 if (!isUndefOrEqual(Mask[i], Low))
3940 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3941 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3942 /// operand - by default will match for first operand.
3943 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3944 bool TestSecondOperand = false) {
3945 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3946 VT != MVT::v2f64 && VT != MVT::v2i64)
3949 unsigned NumElems = VT.getVectorNumElements();
3950 unsigned Lo = TestSecondOperand ? NumElems : 0;
3951 unsigned Hi = Lo + NumElems;
3953 for (unsigned i = 0; i < NumElems; ++i)
3954 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3960 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3961 /// is suitable for input to PSHUFHW.
3962 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3963 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3966 // Lower quadword copied in order or undef.
3967 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3970 // Upper quadword shuffled.
3971 for (unsigned i = 4; i != 8; ++i)
3972 if (!isUndefOrInRange(Mask[i], 4, 8))
3975 if (VT == MVT::v16i16) {
3976 // Lower quadword copied in order or undef.
3977 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3980 // Upper quadword shuffled.
3981 for (unsigned i = 12; i != 16; ++i)
3982 if (!isUndefOrInRange(Mask[i], 12, 16))
3989 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3990 /// is suitable for input to PSHUFLW.
3991 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3992 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3995 // Upper quadword copied in order.
3996 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
3999 // Lower quadword shuffled.
4000 for (unsigned i = 0; i != 4; ++i)
4001 if (!isUndefOrInRange(Mask[i], 0, 4))
4004 if (VT == MVT::v16i16) {
4005 // Upper quadword copied in order.
4006 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4009 // Lower quadword shuffled.
4010 for (unsigned i = 8; i != 12; ++i)
4011 if (!isUndefOrInRange(Mask[i], 8, 12))
4018 /// \brief Return true if the mask specifies a shuffle of elements that is
4019 /// suitable for input to intralane (palignr) or interlane (valign) vector
4021 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4022 unsigned NumElts = VT.getVectorNumElements();
4023 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4024 unsigned NumLaneElts = NumElts/NumLanes;
4026 // Do not handle 64-bit element shuffles with palignr.
4027 if (NumLaneElts == 2)
4030 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4032 for (i = 0; i != NumLaneElts; ++i) {
4037 // Lane is all undef, go to next lane
4038 if (i == NumLaneElts)
4041 int Start = Mask[i+l];
4043 // Make sure its in this lane in one of the sources
4044 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4045 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4048 // If not lane 0, then we must match lane 0
4049 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4052 // Correct second source to be contiguous with first source
4053 if (Start >= (int)NumElts)
4054 Start -= NumElts - NumLaneElts;
4056 // Make sure we're shifting in the right direction.
4057 if (Start <= (int)(i+l))
4062 // Check the rest of the elements to see if they are consecutive.
4063 for (++i; i != NumLaneElts; ++i) {
4064 int Idx = Mask[i+l];
4066 // Make sure its in this lane
4067 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4068 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4071 // If not lane 0, then we must match lane 0
4072 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4075 if (Idx >= (int)NumElts)
4076 Idx -= NumElts - NumLaneElts;
4078 if (!isUndefOrEqual(Idx, Start+i))
4087 /// \brief Return true if the node specifies a shuffle of elements that is
4088 /// suitable for input to PALIGNR.
4089 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4090 const X86Subtarget *Subtarget) {
4091 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4092 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4093 VT.is512BitVector())
4094 // FIXME: Add AVX512BW.
4097 return isAlignrMask(Mask, VT, false);
4100 /// \brief Return true if the node specifies a shuffle of elements that is
4101 /// suitable for input to VALIGN.
4102 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4103 const X86Subtarget *Subtarget) {
4104 // FIXME: Add AVX512VL.
4105 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4107 return isAlignrMask(Mask, VT, true);
4110 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4111 /// the two vector operands have swapped position.
4112 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4113 unsigned NumElems) {
4114 for (unsigned i = 0; i != NumElems; ++i) {
4118 else if (idx < (int)NumElems)
4119 Mask[i] = idx + NumElems;
4121 Mask[i] = idx - NumElems;
4125 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4126 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4127 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4128 /// reverse of what x86 shuffles want.
4129 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4131 unsigned NumElems = VT.getVectorNumElements();
4132 unsigned NumLanes = VT.getSizeInBits()/128;
4133 unsigned NumLaneElems = NumElems/NumLanes;
4135 if (NumLaneElems != 2 && NumLaneElems != 4)
4138 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4139 bool symmetricMaskRequired =
4140 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4142 // VSHUFPSY divides the resulting vector into 4 chunks.
4143 // The sources are also splitted into 4 chunks, and each destination
4144 // chunk must come from a different source chunk.
4146 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4147 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4149 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4150 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4152 // VSHUFPDY divides the resulting vector into 4 chunks.
4153 // The sources are also splitted into 4 chunks, and each destination
4154 // chunk must come from a different source chunk.
4156 // SRC1 => X3 X2 X1 X0
4157 // SRC2 => Y3 Y2 Y1 Y0
4159 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4161 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4162 unsigned HalfLaneElems = NumLaneElems/2;
4163 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4164 for (unsigned i = 0; i != NumLaneElems; ++i) {
4165 int Idx = Mask[i+l];
4166 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4167 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4169 // For VSHUFPSY, the mask of the second half must be the same as the
4170 // first but with the appropriate offsets. This works in the same way as
4171 // VPERMILPS works with masks.
4172 if (!symmetricMaskRequired || Idx < 0)
4174 if (MaskVal[i] < 0) {
4175 MaskVal[i] = Idx - l;
4178 if ((signed)(Idx - l) != MaskVal[i])
4186 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4187 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4188 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4189 if (!VT.is128BitVector())
4192 unsigned NumElems = VT.getVectorNumElements();
4197 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4198 return isUndefOrEqual(Mask[0], 6) &&
4199 isUndefOrEqual(Mask[1], 7) &&
4200 isUndefOrEqual(Mask[2], 2) &&
4201 isUndefOrEqual(Mask[3], 3);
4204 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4205 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4207 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4208 if (!VT.is128BitVector())
4211 unsigned NumElems = VT.getVectorNumElements();
4216 return isUndefOrEqual(Mask[0], 2) &&
4217 isUndefOrEqual(Mask[1], 3) &&
4218 isUndefOrEqual(Mask[2], 2) &&
4219 isUndefOrEqual(Mask[3], 3);
4222 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4223 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4224 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4225 if (!VT.is128BitVector())
4228 unsigned NumElems = VT.getVectorNumElements();
4230 if (NumElems != 2 && NumElems != 4)
4233 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4234 if (!isUndefOrEqual(Mask[i], i + NumElems))
4237 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i))
4244 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4245 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4246 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4247 if (!VT.is128BitVector())
4250 unsigned NumElems = VT.getVectorNumElements();
4252 if (NumElems != 2 && NumElems != 4)
4255 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4256 if (!isUndefOrEqual(Mask[i], i))
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4266 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4267 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4268 /// i. e: If all but one element come from the same vector.
4269 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4270 // TODO: Deal with AVX's VINSERTPS
4271 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4274 unsigned CorrectPosV1 = 0;
4275 unsigned CorrectPosV2 = 0;
4276 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4277 if (Mask[i] == -1) {
4285 else if (Mask[i] == i + 4)
4289 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4290 // We have 3 elements (undefs count as elements from any vector) from one
4291 // vector, and one from another.
4298 // Some special combinations that can be optimized.
4301 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4302 SelectionDAG &DAG) {
4303 MVT VT = SVOp->getSimpleValueType(0);
4306 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4309 ArrayRef<int> Mask = SVOp->getMask();
4311 // These are the special masks that may be optimized.
4312 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4313 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4314 bool MatchEvenMask = true;
4315 bool MatchOddMask = true;
4316 for (int i=0; i<8; ++i) {
4317 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4318 MatchEvenMask = false;
4319 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4320 MatchOddMask = false;
4323 if (!MatchEvenMask && !MatchOddMask)
4326 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4328 SDValue Op0 = SVOp->getOperand(0);
4329 SDValue Op1 = SVOp->getOperand(1);
4331 if (MatchEvenMask) {
4332 // Shift the second operand right to 32 bits.
4333 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4334 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4336 // Shift the first operand left to 32 bits.
4337 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4338 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4340 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4341 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4344 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4345 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4346 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4347 bool HasInt256, bool V2IsSplat = false) {
4349 assert(VT.getSizeInBits() >= 128 &&
4350 "Unsupported vector type for unpckl");
4352 unsigned NumElts = VT.getVectorNumElements();
4353 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4354 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4357 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4358 "Unsupported vector type for unpckh");
4360 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4361 unsigned NumLanes = VT.getSizeInBits()/128;
4362 unsigned NumLaneElts = NumElts/NumLanes;
4364 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4365 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4366 int BitI = Mask[l+i];
4367 int BitI1 = Mask[l+i+1];
4368 if (!isUndefOrEqual(BitI, j))
4371 if (!isUndefOrEqual(BitI1, NumElts))
4374 if (!isUndefOrEqual(BitI1, j + NumElts))
4383 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4384 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4385 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4386 bool HasInt256, bool V2IsSplat = false) {
4387 assert(VT.getSizeInBits() >= 128 &&
4388 "Unsupported vector type for unpckh");
4390 unsigned NumElts = VT.getVectorNumElements();
4391 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4392 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4395 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4396 "Unsupported vector type for unpckh");
4398 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4399 unsigned NumLanes = VT.getSizeInBits()/128;
4400 unsigned NumLaneElts = NumElts/NumLanes;
4402 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4403 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4404 int BitI = Mask[l+i];
4405 int BitI1 = Mask[l+i+1];
4406 if (!isUndefOrEqual(BitI, j))
4409 if (isUndefOrEqual(BitI1, NumElts))
4412 if (!isUndefOrEqual(BitI1, j+NumElts))
4420 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4421 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4423 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4424 unsigned NumElts = VT.getVectorNumElements();
4425 bool Is256BitVec = VT.is256BitVector();
4427 if (VT.is512BitVector())
4429 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4430 "Unsupported vector type for unpckh");
4432 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4433 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4436 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4437 // FIXME: Need a better way to get rid of this, there's no latency difference
4438 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4439 // the former later. We should also remove the "_undef" special mask.
4440 if (NumElts == 4 && Is256BitVec)
4443 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4444 // independently on 128-bit lanes.
4445 unsigned NumLanes = VT.getSizeInBits()/128;
4446 unsigned NumLaneElts = NumElts/NumLanes;
4448 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4449 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4450 int BitI = Mask[l+i];
4451 int BitI1 = Mask[l+i+1];
4453 if (!isUndefOrEqual(BitI, j))
4455 if (!isUndefOrEqual(BitI1, j))
4463 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4464 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4466 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4467 unsigned NumElts = VT.getVectorNumElements();
4469 if (VT.is512BitVector())
4472 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4473 "Unsupported vector type for unpckh");
4475 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4476 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4479 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4480 // independently on 128-bit lanes.
4481 unsigned NumLanes = VT.getSizeInBits()/128;
4482 unsigned NumLaneElts = NumElts/NumLanes;
4484 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4485 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4486 int BitI = Mask[l+i];
4487 int BitI1 = Mask[l+i+1];
4488 if (!isUndefOrEqual(BitI, j))
4490 if (!isUndefOrEqual(BitI1, j))
4497 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4498 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4499 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4500 if (!VT.is512BitVector())
4503 unsigned NumElts = VT.getVectorNumElements();
4504 unsigned HalfSize = NumElts/2;
4505 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4506 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4511 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4512 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4520 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4521 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4522 /// MOVSD, and MOVD, i.e. setting the lowest element.
4523 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4524 if (VT.getVectorElementType().getSizeInBits() < 32)
4526 if (!VT.is128BitVector())
4529 unsigned NumElts = VT.getVectorNumElements();
4531 if (!isUndefOrEqual(Mask[0], NumElts))
4534 for (unsigned i = 1; i != NumElts; ++i)
4535 if (!isUndefOrEqual(Mask[i], i))
4541 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4542 /// as permutations between 128-bit chunks or halves. As an example: this
4544 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4545 /// The first half comes from the second half of V1 and the second half from the
4546 /// the second half of V2.
4547 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4548 if (!HasFp256 || !VT.is256BitVector())
4551 // The shuffle result is divided into half A and half B. In total the two
4552 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4553 // B must come from C, D, E or F.
4554 unsigned HalfSize = VT.getVectorNumElements()/2;
4555 bool MatchA = false, MatchB = false;
4557 // Check if A comes from one of C, D, E, F.
4558 for (unsigned Half = 0; Half != 4; ++Half) {
4559 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4565 // Check if B comes from one of C, D, E, F.
4566 for (unsigned Half = 0; Half != 4; ++Half) {
4567 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4573 return MatchA && MatchB;
4576 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4577 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4578 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4579 MVT VT = SVOp->getSimpleValueType(0);
4581 unsigned HalfSize = VT.getVectorNumElements()/2;
4583 unsigned FstHalf = 0, SndHalf = 0;
4584 for (unsigned i = 0; i < HalfSize; ++i) {
4585 if (SVOp->getMaskElt(i) > 0) {
4586 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4590 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4591 if (SVOp->getMaskElt(i) > 0) {
4592 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4597 return (FstHalf | (SndHalf << 4));
4600 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4601 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4602 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4606 unsigned NumElts = VT.getVectorNumElements();
4608 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4609 for (unsigned i = 0; i != NumElts; ++i) {
4612 Imm8 |= Mask[i] << (i*2);
4617 unsigned LaneSize = 4;
4618 SmallVector<int, 4> MaskVal(LaneSize, -1);
4620 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4621 for (unsigned i = 0; i != LaneSize; ++i) {
4622 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4626 if (MaskVal[i] < 0) {
4627 MaskVal[i] = Mask[i+l] - l;
4628 Imm8 |= MaskVal[i] << (i*2);
4631 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4638 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4639 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4640 /// Note that VPERMIL mask matching is different depending whether theunderlying
4641 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4642 /// to the same elements of the low, but to the higher half of the source.
4643 /// In VPERMILPD the two lanes could be shuffled independently of each other
4644 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4645 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4646 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4647 if (VT.getSizeInBits() < 256 || EltSize < 32)
4649 bool symmetricMaskRequired = (EltSize == 32);
4650 unsigned NumElts = VT.getVectorNumElements();
4652 unsigned NumLanes = VT.getSizeInBits()/128;
4653 unsigned LaneSize = NumElts/NumLanes;
4654 // 2 or 4 elements in one lane
4656 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4657 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4658 for (unsigned i = 0; i != LaneSize; ++i) {
4659 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4661 if (symmetricMaskRequired) {
4662 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4663 ExpectedMaskVal[i] = Mask[i+l] - l;
4666 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4674 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4675 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4676 /// element of vector 2 and the other elements to come from vector 1 in order.
4677 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4678 bool V2IsSplat = false, bool V2IsUndef = false) {
4679 if (!VT.is128BitVector())
4682 unsigned NumOps = VT.getVectorNumElements();
4683 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4686 if (!isUndefOrEqual(Mask[0], 0))
4689 for (unsigned i = 1; i != NumOps; ++i)
4690 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4691 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4692 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4698 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4699 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4700 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4701 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4702 const X86Subtarget *Subtarget) {
4703 if (!Subtarget->hasSSE3())
4706 unsigned NumElems = VT.getVectorNumElements();
4708 if ((VT.is128BitVector() && NumElems != 4) ||
4709 (VT.is256BitVector() && NumElems != 8) ||
4710 (VT.is512BitVector() && NumElems != 16))
4713 // "i+1" is the value the indexed mask element must have
4714 for (unsigned i = 0; i != NumElems; i += 2)
4715 if (!isUndefOrEqual(Mask[i], i+1) ||
4716 !isUndefOrEqual(Mask[i+1], i+1))
4722 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4723 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4724 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4725 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4726 const X86Subtarget *Subtarget) {
4727 if (!Subtarget->hasSSE3())
4730 unsigned NumElems = VT.getVectorNumElements();
4732 if ((VT.is128BitVector() && NumElems != 4) ||
4733 (VT.is256BitVector() && NumElems != 8) ||
4734 (VT.is512BitVector() && NumElems != 16))
4737 // "i" is the value the indexed mask element must have
4738 for (unsigned i = 0; i != NumElems; i += 2)
4739 if (!isUndefOrEqual(Mask[i], i) ||
4740 !isUndefOrEqual(Mask[i+1], i))
4746 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4747 /// specifies a shuffle of elements that is suitable for input to 256-bit
4748 /// version of MOVDDUP.
4749 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4750 if (!HasFp256 || !VT.is256BitVector())
4753 unsigned NumElts = VT.getVectorNumElements();
4757 for (unsigned i = 0; i != NumElts/2; ++i)
4758 if (!isUndefOrEqual(Mask[i], 0))
4760 for (unsigned i = NumElts/2; i != NumElts; ++i)
4761 if (!isUndefOrEqual(Mask[i], NumElts/2))
4766 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4767 /// specifies a shuffle of elements that is suitable for input to 128-bit
4768 /// version of MOVDDUP.
4769 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4770 if (!VT.is128BitVector())
4773 unsigned e = VT.getVectorNumElements() / 2;
4774 for (unsigned i = 0; i != e; ++i)
4775 if (!isUndefOrEqual(Mask[i], i))
4777 for (unsigned i = 0; i != e; ++i)
4778 if (!isUndefOrEqual(Mask[e+i], i))
4783 /// isVEXTRACTIndex - Return true if the specified
4784 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4785 /// suitable for instruction that extract 128 or 256 bit vectors
4786 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4787 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4788 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4791 // The index should be aligned on a vecWidth-bit boundary.
4793 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4795 MVT VT = N->getSimpleValueType(0);
4796 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4797 bool Result = (Index * ElSize) % vecWidth == 0;
4802 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4803 /// operand specifies a subvector insert that is suitable for input to
4804 /// insertion of 128 or 256-bit subvectors
4805 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4806 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4807 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4809 // The index should be aligned on a vecWidth-bit boundary.
4811 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4813 MVT VT = N->getSimpleValueType(0);
4814 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4815 bool Result = (Index * ElSize) % vecWidth == 0;
4820 bool X86::isVINSERT128Index(SDNode *N) {
4821 return isVINSERTIndex(N, 128);
4824 bool X86::isVINSERT256Index(SDNode *N) {
4825 return isVINSERTIndex(N, 256);
4828 bool X86::isVEXTRACT128Index(SDNode *N) {
4829 return isVEXTRACTIndex(N, 128);
4832 bool X86::isVEXTRACT256Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 256);
4836 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4837 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4838 /// Handles 128-bit and 256-bit.
4839 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4840 MVT VT = N->getSimpleValueType(0);
4842 assert((VT.getSizeInBits() >= 128) &&
4843 "Unsupported vector type for PSHUF/SHUFP");
4845 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4846 // independently on 128-bit lanes.
4847 unsigned NumElts = VT.getVectorNumElements();
4848 unsigned NumLanes = VT.getSizeInBits()/128;
4849 unsigned NumLaneElts = NumElts/NumLanes;
4851 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4852 "Only supports 2, 4 or 8 elements per lane");
4854 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4856 for (unsigned i = 0; i != NumElts; ++i) {
4857 int Elt = N->getMaskElt(i);
4858 if (Elt < 0) continue;
4859 Elt &= NumLaneElts - 1;
4860 unsigned ShAmt = (i << Shift) % 8;
4861 Mask |= Elt << ShAmt;
4867 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4868 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4869 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4870 MVT VT = N->getSimpleValueType(0);
4872 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4873 "Unsupported vector type for PSHUFHW");
4875 unsigned NumElts = VT.getVectorNumElements();
4878 for (unsigned l = 0; l != NumElts; l += 8) {
4879 // 8 nodes per lane, but we only care about the last 4.
4880 for (unsigned i = 0; i < 4; ++i) {
4881 int Elt = N->getMaskElt(l+i+4);
4882 if (Elt < 0) continue;
4883 Elt &= 0x3; // only 2-bits.
4884 Mask |= Elt << (i * 2);
4891 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4892 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4893 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4894 MVT VT = N->getSimpleValueType(0);
4896 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4897 "Unsupported vector type for PSHUFHW");
4899 unsigned NumElts = VT.getVectorNumElements();
4902 for (unsigned l = 0; l != NumElts; l += 8) {
4903 // 8 nodes per lane, but we only care about the first 4.
4904 for (unsigned i = 0; i < 4; ++i) {
4905 int Elt = N->getMaskElt(l+i);
4906 if (Elt < 0) continue;
4907 Elt &= 0x3; // only 2-bits
4908 Mask |= Elt << (i * 2);
4915 /// \brief Return the appropriate immediate to shuffle the specified
4916 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4917 /// VALIGN (if Interlane is true) instructions.
4918 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4920 MVT VT = SVOp->getSimpleValueType(0);
4921 unsigned EltSize = InterLane ? 1 :
4922 VT.getVectorElementType().getSizeInBits() >> 3;
4924 unsigned NumElts = VT.getVectorNumElements();
4925 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4926 unsigned NumLaneElts = NumElts/NumLanes;
4930 for (i = 0; i != NumElts; ++i) {
4931 Val = SVOp->getMaskElt(i);
4935 if (Val >= (int)NumElts)
4936 Val -= NumElts - NumLaneElts;
4938 assert(Val - i > 0 && "PALIGNR imm should be positive");
4939 return (Val - i) * EltSize;
4942 /// \brief Return the appropriate immediate to shuffle the specified
4943 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4944 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4945 return getShuffleAlignrImmediate(SVOp, false);
4948 /// \brief Return the appropriate immediate to shuffle the specified
4949 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4950 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4951 return getShuffleAlignrImmediate(SVOp, true);
4955 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4956 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4957 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4958 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4961 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4963 MVT VecVT = N->getOperand(0).getSimpleValueType();
4964 MVT ElVT = VecVT.getVectorElementType();
4966 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4967 return Index / NumElemsPerChunk;
4970 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4971 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4972 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4973 llvm_unreachable("Illegal insert subvector for VINSERT");
4976 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4978 MVT VecVT = N->getSimpleValueType(0);
4979 MVT ElVT = VecVT.getVectorElementType();
4981 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4982 return Index / NumElemsPerChunk;
4985 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4986 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4987 /// and VINSERTI128 instructions.
4988 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4989 return getExtractVEXTRACTImmediate(N, 128);
4992 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4994 /// and VINSERTI64x4 instructions.
4995 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 256);
4999 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5000 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5001 /// and VINSERTI128 instructions.
5002 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5003 return getInsertVINSERTImmediate(N, 128);
5006 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5008 /// and VINSERTI64x4 instructions.
5009 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 256);
5013 /// isZero - Returns true if Elt is a constant integer zero
5014 static bool isZero(SDValue V) {
5015 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5016 return C && C->isNullValue();
5019 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5021 bool X86::isZeroNode(SDValue Elt) {
5024 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5025 return CFP->getValueAPF().isPosZero();
5029 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5030 /// match movhlps. The lower half elements should come from upper half of
5031 /// V1 (and in order), and the upper half elements should come from the upper
5032 /// half of V2 (and in order).
5033 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5034 if (!VT.is128BitVector())
5036 if (VT.getVectorNumElements() != 4)
5038 for (unsigned i = 0, e = 2; i != e; ++i)
5039 if (!isUndefOrEqual(Mask[i], i+2))
5041 for (unsigned i = 2; i != 4; ++i)
5042 if (!isUndefOrEqual(Mask[i], i+4))
5047 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5048 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5050 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5051 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5053 N = N->getOperand(0).getNode();
5054 if (!ISD::isNON_EXTLoad(N))
5057 *LD = cast<LoadSDNode>(N);
5061 // Test whether the given value is a vector value which will be legalized
5063 static bool WillBeConstantPoolLoad(SDNode *N) {
5064 if (N->getOpcode() != ISD::BUILD_VECTOR)
5067 // Check for any non-constant elements.
5068 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5069 switch (N->getOperand(i).getNode()->getOpcode()) {
5071 case ISD::ConstantFP:
5078 // Vectors of all-zeros and all-ones are materialized with special
5079 // instructions rather than being loaded.
5080 return !ISD::isBuildVectorAllZeros(N) &&
5081 !ISD::isBuildVectorAllOnes(N);
5084 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5085 /// match movlp{s|d}. The lower half elements should come from lower half of
5086 /// V1 (and in order), and the upper half elements should come from the upper
5087 /// half of V2 (and in order). And since V1 will become the source of the
5088 /// MOVLP, it must be either a vector load or a scalar load to vector.
5089 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5090 ArrayRef<int> Mask, MVT VT) {
5091 if (!VT.is128BitVector())
5094 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5096 // Is V2 is a vector load, don't do this transformation. We will try to use
5097 // load folding shufps op.
5098 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5101 unsigned NumElems = VT.getVectorNumElements();
5103 if (NumElems != 2 && NumElems != 4)
5105 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5106 if (!isUndefOrEqual(Mask[i], i))
5108 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5109 if (!isUndefOrEqual(Mask[i], i+NumElems))
5114 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5115 /// to an zero vector.
5116 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5117 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5118 SDValue V1 = N->getOperand(0);
5119 SDValue V2 = N->getOperand(1);
5120 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5121 for (unsigned i = 0; i != NumElems; ++i) {
5122 int Idx = N->getMaskElt(i);
5123 if (Idx >= (int)NumElems) {
5124 unsigned Opc = V2.getOpcode();
5125 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5127 if (Opc != ISD::BUILD_VECTOR ||
5128 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5130 } else if (Idx >= 0) {
5131 unsigned Opc = V1.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V1.getOperand(Idx)))
5142 /// getZeroVector - Returns a vector of specified type with all zero elements.
5144 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5145 SelectionDAG &DAG, SDLoc dl) {
5146 assert(VT.isVector() && "Expected a vector type");
5148 // Always build SSE zero vectors as <4 x i32> bitcasted
5149 // to their dest type. This ensures they get CSE'd.
5151 if (VT.is128BitVector()) { // SSE
5152 if (Subtarget->hasSSE2()) { // SSE2
5153 SDValue Cst = DAG.getConstant(0, MVT::i32);
5154 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5156 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5157 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5159 } else if (VT.is256BitVector()) { // AVX
5160 if (Subtarget->hasInt256()) { // AVX2
5161 SDValue Cst = DAG.getConstant(0, MVT::i32);
5162 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5163 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5165 // 256-bit logic and arithmetic instructions in AVX are all
5166 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5167 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5168 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5169 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5171 } else if (VT.is512BitVector()) { // AVX-512
5172 SDValue Cst = DAG.getConstant(0, MVT::i32);
5173 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5174 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5175 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5176 } else if (VT.getScalarType() == MVT::i1) {
5177 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5178 SDValue Cst = DAG.getConstant(0, MVT::i1);
5179 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5180 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5182 llvm_unreachable("Unexpected vector type");
5184 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5187 /// getOnesVector - Returns a vector of specified type with all bits set.
5188 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5189 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5190 /// Then bitcast to their original type, ensuring they get CSE'd.
5191 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5193 assert(VT.isVector() && "Expected a vector type");
5195 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5197 if (VT.is256BitVector()) {
5198 if (HasInt256) { // AVX2
5199 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5200 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5202 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5203 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5205 } else if (VT.is128BitVector()) {
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5208 llvm_unreachable("Unexpected vector type");
5210 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5213 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5214 /// that point to V2 points to its first element.
5215 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5216 for (unsigned i = 0; i != NumElems; ++i) {
5217 if (Mask[i] > (int)NumElems) {
5223 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5224 /// operation of specified width.
5225 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5227 unsigned NumElems = VT.getVectorNumElements();
5228 SmallVector<int, 8> Mask;
5229 Mask.push_back(NumElems);
5230 for (unsigned i = 1; i != NumElems; ++i)
5232 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5235 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5236 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5238 unsigned NumElems = VT.getVectorNumElements();
5239 SmallVector<int, 8> Mask;
5240 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5242 Mask.push_back(i + NumElems);
5244 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5247 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5248 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5250 unsigned NumElems = VT.getVectorNumElements();
5251 SmallVector<int, 8> Mask;
5252 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5253 Mask.push_back(i + Half);
5254 Mask.push_back(i + NumElems + Half);
5256 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5259 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5260 // a generic shuffle instruction because the target has no such instructions.
5261 // Generate shuffles which repeat i16 and i8 several times until they can be
5262 // represented by v4f32 and then be manipulated by target suported shuffles.
5263 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5264 MVT VT = V.getSimpleValueType();
5265 int NumElems = VT.getVectorNumElements();
5268 while (NumElems > 4) {
5269 if (EltNo < NumElems/2) {
5270 V = getUnpackl(DAG, dl, VT, V, V);
5272 V = getUnpackh(DAG, dl, VT, V, V);
5273 EltNo -= NumElems/2;
5280 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5281 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5282 MVT VT = V.getSimpleValueType();
5285 if (VT.is128BitVector()) {
5286 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5287 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5288 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5290 } else if (VT.is256BitVector()) {
5291 // To use VPERMILPS to splat scalars, the second half of indicies must
5292 // refer to the higher part, which is a duplication of the lower one,
5293 // because VPERMILPS can only handle in-lane permutations.
5294 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5295 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5297 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5298 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5301 llvm_unreachable("Vector size not supported");
5303 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5306 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5307 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5308 MVT SrcVT = SV->getSimpleValueType(0);
5309 SDValue V1 = SV->getOperand(0);
5312 int EltNo = SV->getSplatIndex();
5313 int NumElems = SrcVT.getVectorNumElements();
5314 bool Is256BitVec = SrcVT.is256BitVector();
5316 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5317 "Unknown how to promote splat for type");
5319 // Extract the 128-bit part containing the splat element and update
5320 // the splat element index when it refers to the higher register.
5322 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5323 if (EltNo >= NumElems/2)
5324 EltNo -= NumElems/2;
5327 // All i16 and i8 vector types can't be used directly by a generic shuffle
5328 // instruction because the target has no such instruction. Generate shuffles
5329 // which repeat i16 and i8 several times until they fit in i32, and then can
5330 // be manipulated by target suported shuffles.
5331 MVT EltVT = SrcVT.getVectorElementType();
5332 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5333 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5335 // Recreate the 256-bit vector and place the same 128-bit vector
5336 // into the low and high part. This is necessary because we want
5337 // to use VPERM* to shuffle the vectors
5339 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5342 return getLegalSplat(DAG, V1, EltNo);
5345 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5346 /// vector of zero or undef vector. This produces a shuffle where the low
5347 /// element of V2 is swizzled into the zero/undef vector, landing at element
5348 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5349 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5351 const X86Subtarget *Subtarget,
5352 SelectionDAG &DAG) {
5353 MVT VT = V2.getSimpleValueType();
5355 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5356 unsigned NumElems = VT.getVectorNumElements();
5357 SmallVector<int, 16> MaskVec;
5358 for (unsigned i = 0; i != NumElems; ++i)
5359 // If this is the insertion idx, put the low elt of V2 here.
5360 MaskVec.push_back(i == Idx ? NumElems : i);
5361 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5364 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5365 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5366 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5367 /// shuffles which use a single input multiple times, and in those cases it will
5368 /// adjust the mask to only have indices within that single input.
5369 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5370 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5371 unsigned NumElems = VT.getVectorNumElements();
5375 bool IsFakeUnary = false;
5376 switch(N->getOpcode()) {
5377 case X86ISD::BLENDI:
5378 ImmN = N->getOperand(N->getNumOperands()-1);
5379 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5384 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5386 case X86ISD::UNPCKH:
5387 DecodeUNPCKHMask(VT, Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKL:
5391 DecodeUNPCKLMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::MOVHLPS:
5395 DecodeMOVHLPSMask(NumElems, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVLHPS:
5399 DecodeMOVLHPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::PALIGNR:
5403 ImmN = N->getOperand(N->getNumOperands()-1);
5404 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5406 case X86ISD::PSHUFD:
5407 case X86ISD::VPERMILPI:
5408 ImmN = N->getOperand(N->getNumOperands()-1);
5409 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5412 case X86ISD::PSHUFHW:
5413 ImmN = N->getOperand(N->getNumOperands()-1);
5414 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5417 case X86ISD::PSHUFLW:
5418 ImmN = N->getOperand(N->getNumOperands()-1);
5419 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5422 case X86ISD::PSHUFB: {
5424 SDValue MaskNode = N->getOperand(1);
5425 while (MaskNode->getOpcode() == ISD::BITCAST)
5426 MaskNode = MaskNode->getOperand(0);
5428 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5429 // If we have a build-vector, then things are easy.
5430 EVT VT = MaskNode.getValueType();
5431 assert(VT.isVector() &&
5432 "Can't produce a non-vector with a build_vector!");
5433 if (!VT.isInteger())
5436 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5438 SmallVector<uint64_t, 32> RawMask;
5439 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5440 SDValue Op = MaskNode->getOperand(i);
5441 if (Op->getOpcode() == ISD::UNDEF) {
5442 RawMask.push_back((uint64_t)SM_SentinelUndef);
5445 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5448 APInt MaskElement = CN->getAPIntValue();
5450 // We now have to decode the element which could be any integer size and
5451 // extract each byte of it.
5452 for (int j = 0; j < NumBytesPerElement; ++j) {
5453 // Note that this is x86 and so always little endian: the low byte is
5454 // the first byte of the mask.
5455 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5456 MaskElement = MaskElement.lshr(8);
5459 DecodePSHUFBMask(RawMask, Mask);
5463 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5467 SDValue Ptr = MaskLoad->getBasePtr();
5468 if (Ptr->getOpcode() == X86ISD::Wrapper)
5469 Ptr = Ptr->getOperand(0);
5471 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5472 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5475 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5476 DecodePSHUFBMask(C, Mask);
5484 case X86ISD::VPERMI:
5485 ImmN = N->getOperand(N->getNumOperands()-1);
5486 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5491 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5493 case X86ISD::VPERM2X128:
5494 ImmN = N->getOperand(N->getNumOperands()-1);
5495 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5496 if (Mask.empty()) return false;
5498 case X86ISD::MOVSLDUP:
5499 DecodeMOVSLDUPMask(VT, Mask);
5502 case X86ISD::MOVSHDUP:
5503 DecodeMOVSHDUPMask(VT, Mask);
5506 case X86ISD::MOVDDUP:
5507 DecodeMOVDDUPMask(VT, Mask);
5510 case X86ISD::MOVLHPD:
5511 case X86ISD::MOVLPD:
5512 case X86ISD::MOVLPS:
5513 // Not yet implemented
5515 default: llvm_unreachable("unknown target shuffle node");
5518 // If we have a fake unary shuffle, the shuffle mask is spread across two
5519 // inputs that are actually the same node. Re-map the mask to always point
5520 // into the first input.
5523 if (M >= (int)Mask.size())
5529 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5530 /// element of the result of the vector shuffle.
5531 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5534 return SDValue(); // Limit search depth.
5536 SDValue V = SDValue(N, 0);
5537 EVT VT = V.getValueType();
5538 unsigned Opcode = V.getOpcode();
5540 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5541 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5542 int Elt = SV->getMaskElt(Index);
5545 return DAG.getUNDEF(VT.getVectorElementType());
5547 unsigned NumElems = VT.getVectorNumElements();
5548 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5549 : SV->getOperand(1);
5550 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5553 // Recurse into target specific vector shuffles to find scalars.
5554 if (isTargetShuffle(Opcode)) {
5555 MVT ShufVT = V.getSimpleValueType();
5556 unsigned NumElems = ShufVT.getVectorNumElements();
5557 SmallVector<int, 16> ShuffleMask;
5560 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5563 int Elt = ShuffleMask[Index];
5565 return DAG.getUNDEF(ShufVT.getVectorElementType());
5567 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5569 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5573 // Actual nodes that may contain scalar elements
5574 if (Opcode == ISD::BITCAST) {
5575 V = V.getOperand(0);
5576 EVT SrcVT = V.getValueType();
5577 unsigned NumElems = VT.getVectorNumElements();
5579 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5583 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5584 return (Index == 0) ? V.getOperand(0)
5585 : DAG.getUNDEF(VT.getVectorElementType());
5587 if (V.getOpcode() == ISD::BUILD_VECTOR)
5588 return V.getOperand(Index);
5593 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5594 /// shuffle operation which come from a consecutively from a zero. The
5595 /// search can start in two different directions, from left or right.
5596 /// We count undefs as zeros until PreferredNum is reached.
5597 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5598 unsigned NumElems, bool ZerosFromLeft,
5600 unsigned PreferredNum = -1U) {
5601 unsigned NumZeros = 0;
5602 for (unsigned i = 0; i != NumElems; ++i) {
5603 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5604 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5608 if (X86::isZeroNode(Elt))
5610 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5611 NumZeros = std::min(NumZeros + 1, PreferredNum);
5619 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5620 /// correspond consecutively to elements from one of the vector operands,
5621 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5623 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5624 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5625 unsigned NumElems, unsigned &OpNum) {
5626 bool SeenV1 = false;
5627 bool SeenV2 = false;
5629 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5630 int Idx = SVOp->getMaskElt(i);
5631 // Ignore undef indicies
5635 if (Idx < (int)NumElems)
5640 // Only accept consecutive elements from the same vector
5641 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5645 OpNum = SeenV1 ? 0 : 1;
5649 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5650 /// logical left shift of a vector.
5651 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5652 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5654 SVOp->getSimpleValueType(0).getVectorNumElements();
5655 unsigned NumZeros = getNumOfConsecutiveZeros(
5656 SVOp, NumElems, false /* check zeros from right */, DAG,
5657 SVOp->getMaskElt(0));
5663 // Considering the elements in the mask that are not consecutive zeros,
5664 // check if they consecutively come from only one of the source vectors.
5666 // V1 = {X, A, B, C} 0
5668 // vector_shuffle V1, V2 <1, 2, 3, X>
5670 if (!isShuffleMaskConsecutive(SVOp,
5671 0, // Mask Start Index
5672 NumElems-NumZeros, // Mask End Index(exclusive)
5673 NumZeros, // Where to start looking in the src vector
5674 NumElems, // Number of elements in vector
5675 OpSrc)) // Which source operand ?
5680 ShVal = SVOp->getOperand(OpSrc);
5684 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5685 /// logical left shift of a vector.
5686 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5687 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5689 SVOp->getSimpleValueType(0).getVectorNumElements();
5690 unsigned NumZeros = getNumOfConsecutiveZeros(
5691 SVOp, NumElems, true /* check zeros from left */, DAG,
5692 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5698 // Considering the elements in the mask that are not consecutive zeros,
5699 // check if they consecutively come from only one of the source vectors.
5701 // 0 { A, B, X, X } = V2
5703 // vector_shuffle V1, V2 <X, X, 4, 5>
5705 if (!isShuffleMaskConsecutive(SVOp,
5706 NumZeros, // Mask Start Index
5707 NumElems, // Mask End Index(exclusive)
5708 0, // Where to start looking in the src vector
5709 NumElems, // Number of elements in vector
5710 OpSrc)) // Which source operand ?
5715 ShVal = SVOp->getOperand(OpSrc);
5719 /// isVectorShift - Returns true if the shuffle can be implemented as a
5720 /// logical left or right shift of a vector.
5721 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5722 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5723 // Although the logic below support any bitwidth size, there are no
5724 // shift instructions which handle more than 128-bit vectors.
5725 if (!SVOp->getSimpleValueType(0).is128BitVector())
5728 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5729 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5735 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5737 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5738 unsigned NumNonZero, unsigned NumZero,
5740 const X86Subtarget* Subtarget,
5741 const TargetLowering &TLI) {
5748 for (unsigned i = 0; i < 16; ++i) {
5749 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5750 if (ThisIsNonZero && First) {
5752 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5754 V = DAG.getUNDEF(MVT::v8i16);
5759 SDValue ThisElt, LastElt;
5760 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5761 if (LastIsNonZero) {
5762 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5763 MVT::i16, Op.getOperand(i-1));
5765 if (ThisIsNonZero) {
5766 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5767 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5768 ThisElt, DAG.getConstant(8, MVT::i8));
5770 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5774 if (ThisElt.getNode())
5775 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5776 DAG.getIntPtrConstant(i/2));
5780 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5783 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5785 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5786 unsigned NumNonZero, unsigned NumZero,
5788 const X86Subtarget* Subtarget,
5789 const TargetLowering &TLI) {
5796 for (unsigned i = 0; i < 8; ++i) {
5797 bool isNonZero = (NonZeros & (1 << i)) != 0;
5801 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5803 V = DAG.getUNDEF(MVT::v8i16);
5806 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5807 MVT::v8i16, V, Op.getOperand(i),
5808 DAG.getIntPtrConstant(i));
5815 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5816 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5817 const X86Subtarget *Subtarget,
5818 const TargetLowering &TLI) {
5819 // Find all zeroable elements.
5820 std::bitset<4> Zeroable;
5821 for (int i=0; i < 4; ++i) {
5822 SDValue Elt = Op->getOperand(i);
5823 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5825 assert(Zeroable.size() - Zeroable.count() > 1 &&
5826 "We expect at least two non-zero elements!");
5828 // We only know how to deal with build_vector nodes where elements are either
5829 // zeroable or extract_vector_elt with constant index.
5830 SDValue FirstNonZero;
5831 unsigned FirstNonZeroIdx;
5832 for (unsigned i=0; i < 4; ++i) {
5835 SDValue Elt = Op->getOperand(i);
5836 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5837 !isa<ConstantSDNode>(Elt.getOperand(1)))
5839 // Make sure that this node is extracting from a 128-bit vector.
5840 MVT VT = Elt.getOperand(0).getSimpleValueType();
5841 if (!VT.is128BitVector())
5843 if (!FirstNonZero.getNode()) {
5845 FirstNonZeroIdx = i;
5849 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5850 SDValue V1 = FirstNonZero.getOperand(0);
5851 MVT VT = V1.getSimpleValueType();
5853 // See if this build_vector can be lowered as a blend with zero.
5855 unsigned EltMaskIdx, EltIdx;
5857 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5858 if (Zeroable[EltIdx]) {
5859 // The zero vector will be on the right hand side.
5860 Mask[EltIdx] = EltIdx+4;
5864 Elt = Op->getOperand(EltIdx);
5865 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5866 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5867 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5869 Mask[EltIdx] = EltIdx;
5873 // Let the shuffle legalizer deal with blend operations.
5874 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5875 if (V1.getSimpleValueType() != VT)
5876 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5877 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5880 // See if we can lower this build_vector to a INSERTPS.
5881 if (!Subtarget->hasSSE41())
5884 SDValue V2 = Elt.getOperand(0);
5885 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5888 bool CanFold = true;
5889 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5893 SDValue Current = Op->getOperand(i);
5894 SDValue SrcVector = Current->getOperand(0);
5897 CanFold = SrcVector == V1 &&
5898 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5904 assert(V1.getNode() && "Expected at least two non-zero elements!");
5905 if (V1.getSimpleValueType() != MVT::v4f32)
5906 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5907 if (V2.getSimpleValueType() != MVT::v4f32)
5908 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5910 // Ok, we can emit an INSERTPS instruction.
5911 unsigned ZMask = Zeroable.to_ulong();
5913 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5914 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5915 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5916 DAG.getIntPtrConstant(InsertPSMask));
5917 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5920 /// Return a vector logical shift node.
5921 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5922 unsigned NumBits, SelectionDAG &DAG,
5923 const TargetLowering &TLI, SDLoc dl) {
5924 assert(VT.is128BitVector() && "Unknown type for VShift");
5925 MVT ShVT = MVT::v2i64;
5926 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5927 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5928 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5929 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
5930 SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy);
5931 return DAG.getNode(ISD::BITCAST, dl, VT,
5932 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5936 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5938 // Check if the scalar load can be widened into a vector load. And if
5939 // the address is "base + cst" see if the cst can be "absorbed" into
5940 // the shuffle mask.
5941 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5942 SDValue Ptr = LD->getBasePtr();
5943 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5945 EVT PVT = LD->getValueType(0);
5946 if (PVT != MVT::i32 && PVT != MVT::f32)
5951 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5952 FI = FINode->getIndex();
5954 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5955 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5956 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5957 Offset = Ptr.getConstantOperandVal(1);
5958 Ptr = Ptr.getOperand(0);
5963 // FIXME: 256-bit vector instructions don't require a strict alignment,
5964 // improve this code to support it better.
5965 unsigned RequiredAlign = VT.getSizeInBits()/8;
5966 SDValue Chain = LD->getChain();
5967 // Make sure the stack object alignment is at least 16 or 32.
5968 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5969 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5970 if (MFI->isFixedObjectIndex(FI)) {
5971 // Can't change the alignment. FIXME: It's possible to compute
5972 // the exact stack offset and reference FI + adjust offset instead.
5973 // If someone *really* cares about this. That's the way to implement it.
5976 MFI->setObjectAlignment(FI, RequiredAlign);
5980 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5981 // Ptr + (Offset & ~15).
5984 if ((Offset % RequiredAlign) & 3)
5986 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5988 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5989 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5991 int EltNo = (Offset - StartOffset) >> 2;
5992 unsigned NumElems = VT.getVectorNumElements();
5994 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5995 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5996 LD->getPointerInfo().getWithOffset(StartOffset),
5997 false, false, false, 0);
5999 SmallVector<int, 8> Mask(NumElems, EltNo);
6001 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6007 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6008 /// elements can be replaced by a single large load which has the same value as
6009 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6011 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6013 /// FIXME: we'd also like to handle the case where the last elements are zero
6014 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6015 /// There's even a handy isZeroNode for that purpose.
6016 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6017 SDLoc &DL, SelectionDAG &DAG,
6018 bool isAfterLegalize) {
6019 unsigned NumElems = Elts.size();
6021 LoadSDNode *LDBase = nullptr;
6022 unsigned LastLoadedElt = -1U;
6024 // For each element in the initializer, see if we've found a load or an undef.
6025 // If we don't find an initial load element, or later load elements are
6026 // non-consecutive, bail out.
6027 for (unsigned i = 0; i < NumElems; ++i) {
6028 SDValue Elt = Elts[i];
6029 // Look through a bitcast.
6030 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6031 Elt = Elt.getOperand(0);
6032 if (!Elt.getNode() ||
6033 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6036 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6038 LDBase = cast<LoadSDNode>(Elt.getNode());
6042 if (Elt.getOpcode() == ISD::UNDEF)
6045 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6046 EVT LdVT = Elt.getValueType();
6047 // Each loaded element must be the correct fractional portion of the
6048 // requested vector load.
6049 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6051 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6056 // If we have found an entire vector of loads and undefs, then return a large
6057 // load of the entire vector width starting at the base pointer. If we found
6058 // consecutive loads for the low half, generate a vzext_load node.
6059 if (LastLoadedElt == NumElems - 1) {
6060 assert(LDBase && "Did not find base load for merging consecutive loads");
6061 EVT EltVT = LDBase->getValueType(0);
6062 // Ensure that the input vector size for the merged loads matches the
6063 // cumulative size of the input elements.
6064 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6067 if (isAfterLegalize &&
6068 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6071 SDValue NewLd = SDValue();
6073 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6074 LDBase->getPointerInfo(), LDBase->isVolatile(),
6075 LDBase->isNonTemporal(), LDBase->isInvariant(),
6076 LDBase->getAlignment());
6078 if (LDBase->hasAnyUseOfValue(1)) {
6079 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6081 SDValue(NewLd.getNode(), 1));
6082 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6083 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6084 SDValue(NewLd.getNode(), 1));
6090 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6091 //of a v4i32 / v4f32. It's probably worth generalizing.
6092 EVT EltVT = VT.getVectorElementType();
6093 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6094 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6095 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6096 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6098 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6099 LDBase->getPointerInfo(),
6100 LDBase->getAlignment(),
6101 false/*isVolatile*/, true/*ReadMem*/,
6104 // Make sure the newly-created LOAD is in the same position as LDBase in
6105 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6106 // update uses of LDBase's output chain to use the TokenFactor.
6107 if (LDBase->hasAnyUseOfValue(1)) {
6108 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6109 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6110 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6111 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6112 SDValue(ResNode.getNode(), 1));
6115 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6120 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6121 /// to generate a splat value for the following cases:
6122 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6123 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6124 /// a scalar load, or a constant.
6125 /// The VBROADCAST node is returned when a pattern is found,
6126 /// or SDValue() otherwise.
6127 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6128 SelectionDAG &DAG) {
6129 // VBROADCAST requires AVX.
6130 // TODO: Splats could be generated for non-AVX CPUs using SSE
6131 // instructions, but there's less potential gain for only 128-bit vectors.
6132 if (!Subtarget->hasAVX())
6135 MVT VT = Op.getSimpleValueType();
6138 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6139 "Unsupported vector type for broadcast.");
6144 switch (Op.getOpcode()) {
6146 // Unknown pattern found.
6149 case ISD::BUILD_VECTOR: {
6150 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6151 BitVector UndefElements;
6152 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6154 // We need a splat of a single value to use broadcast, and it doesn't
6155 // make any sense if the value is only in one element of the vector.
6156 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6160 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6161 Ld.getOpcode() == ISD::ConstantFP);
6163 // Make sure that all of the users of a non-constant load are from the
6164 // BUILD_VECTOR node.
6165 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6170 case ISD::VECTOR_SHUFFLE: {
6171 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6173 // Shuffles must have a splat mask where the first element is
6175 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6178 SDValue Sc = Op.getOperand(0);
6179 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6180 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6182 if (!Subtarget->hasInt256())
6185 // Use the register form of the broadcast instruction available on AVX2.
6186 if (VT.getSizeInBits() >= 256)
6187 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6188 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6191 Ld = Sc.getOperand(0);
6192 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6193 Ld.getOpcode() == ISD::ConstantFP);
6195 // The scalar_to_vector node and the suspected
6196 // load node must have exactly one user.
6197 // Constants may have multiple users.
6199 // AVX-512 has register version of the broadcast
6200 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6201 Ld.getValueType().getSizeInBits() >= 32;
6202 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6209 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6210 bool IsGE256 = (VT.getSizeInBits() >= 256);
6212 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6213 // instruction to save 8 or more bytes of constant pool data.
6214 // TODO: If multiple splats are generated to load the same constant,
6215 // it may be detrimental to overall size. There needs to be a way to detect
6216 // that condition to know if this is truly a size win.
6217 const Function *F = DAG.getMachineFunction().getFunction();
6218 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6220 // Handle broadcasting a single constant scalar from the constant pool
6222 // On Sandybridge (no AVX2), it is still better to load a constant vector
6223 // from the constant pool and not to broadcast it from a scalar.
6224 // But override that restriction when optimizing for size.
6225 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6226 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6227 EVT CVT = Ld.getValueType();
6228 assert(!CVT.isVector() && "Must not broadcast a vector type");
6230 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6231 // For size optimization, also splat v2f64 and v2i64, and for size opt
6232 // with AVX2, also splat i8 and i16.
6233 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6234 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6235 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6236 const Constant *C = nullptr;
6237 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6238 C = CI->getConstantIntValue();
6239 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6240 C = CF->getConstantFPValue();
6242 assert(C && "Invalid constant type");
6244 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6245 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6246 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6247 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6248 MachinePointerInfo::getConstantPool(),
6249 false, false, false, Alignment);
6251 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6255 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6257 // Handle AVX2 in-register broadcasts.
6258 if (!IsLoad && Subtarget->hasInt256() &&
6259 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6260 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6262 // The scalar source must be a normal load.
6266 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6267 (Subtarget->hasVLX() && ScalarSize == 64))
6268 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6270 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6271 // double since there is no vbroadcastsd xmm
6272 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6273 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6274 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6277 // Unsupported broadcast.
6281 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6282 /// underlying vector and index.
6284 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6286 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6288 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6289 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6292 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6294 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6296 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6297 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6300 // In this case the vector is the extract_subvector expression and the index
6301 // is 2, as specified by the shuffle.
6302 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6303 SDValue ShuffleVec = SVOp->getOperand(0);
6304 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6305 assert(ShuffleVecVT.getVectorElementType() ==
6306 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6308 int ShuffleIdx = SVOp->getMaskElt(Idx);
6309 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6310 ExtractedFromVec = ShuffleVec;
6316 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6317 MVT VT = Op.getSimpleValueType();
6319 // Skip if insert_vec_elt is not supported.
6320 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6321 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6325 unsigned NumElems = Op.getNumOperands();
6329 SmallVector<unsigned, 4> InsertIndices;
6330 SmallVector<int, 8> Mask(NumElems, -1);
6332 for (unsigned i = 0; i != NumElems; ++i) {
6333 unsigned Opc = Op.getOperand(i).getOpcode();
6335 if (Opc == ISD::UNDEF)
6338 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6339 // Quit if more than 1 elements need inserting.
6340 if (InsertIndices.size() > 1)
6343 InsertIndices.push_back(i);
6347 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6348 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6349 // Quit if non-constant index.
6350 if (!isa<ConstantSDNode>(ExtIdx))
6352 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6354 // Quit if extracted from vector of different type.
6355 if (ExtractedFromVec.getValueType() != VT)
6358 if (!VecIn1.getNode())
6359 VecIn1 = ExtractedFromVec;
6360 else if (VecIn1 != ExtractedFromVec) {
6361 if (!VecIn2.getNode())
6362 VecIn2 = ExtractedFromVec;
6363 else if (VecIn2 != ExtractedFromVec)
6364 // Quit if more than 2 vectors to shuffle
6368 if (ExtractedFromVec == VecIn1)
6370 else if (ExtractedFromVec == VecIn2)
6371 Mask[i] = Idx + NumElems;
6374 if (!VecIn1.getNode())
6377 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6378 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6379 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6380 unsigned Idx = InsertIndices[i];
6381 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6382 DAG.getIntPtrConstant(Idx));
6388 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6390 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6392 MVT VT = Op.getSimpleValueType();
6393 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6394 "Unexpected type in LowerBUILD_VECTORvXi1!");
6397 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6398 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6399 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6400 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6403 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6404 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6405 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6406 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6409 bool AllContants = true;
6410 uint64_t Immediate = 0;
6411 int NonConstIdx = -1;
6412 bool IsSplat = true;
6413 unsigned NumNonConsts = 0;
6414 unsigned NumConsts = 0;
6415 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6416 SDValue In = Op.getOperand(idx);
6417 if (In.getOpcode() == ISD::UNDEF)
6419 if (!isa<ConstantSDNode>(In)) {
6420 AllContants = false;
6425 if (cast<ConstantSDNode>(In)->getZExtValue())
6426 Immediate |= (1ULL << idx);
6428 if (In != Op.getOperand(0))
6433 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6434 DAG.getConstant(Immediate, MVT::i16));
6435 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6436 DAG.getIntPtrConstant(0));
6439 if (NumNonConsts == 1 && NonConstIdx != 0) {
6442 SDValue VecAsImm = DAG.getConstant(Immediate,
6443 MVT::getIntegerVT(VT.getSizeInBits()));
6444 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6447 DstVec = DAG.getUNDEF(VT);
6448 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6449 Op.getOperand(NonConstIdx),
6450 DAG.getIntPtrConstant(NonConstIdx));
6452 if (!IsSplat && (NonConstIdx != 0))
6453 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6454 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6457 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6458 DAG.getConstant(-1, SelectVT),
6459 DAG.getConstant(0, SelectVT));
6461 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6462 DAG.getConstant((Immediate | 1), SelectVT),
6463 DAG.getConstant(Immediate, SelectVT));
6464 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6467 /// \brief Return true if \p N implements a horizontal binop and return the
6468 /// operands for the horizontal binop into V0 and V1.
6470 /// This is a helper function of PerformBUILD_VECTORCombine.
6471 /// This function checks that the build_vector \p N in input implements a
6472 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6473 /// operation to match.
6474 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6475 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6476 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6479 /// This function only analyzes elements of \p N whose indices are
6480 /// in range [BaseIdx, LastIdx).
6481 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6483 unsigned BaseIdx, unsigned LastIdx,
6484 SDValue &V0, SDValue &V1) {
6485 EVT VT = N->getValueType(0);
6487 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6488 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6489 "Invalid Vector in input!");
6491 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6492 bool CanFold = true;
6493 unsigned ExpectedVExtractIdx = BaseIdx;
6494 unsigned NumElts = LastIdx - BaseIdx;
6495 V0 = DAG.getUNDEF(VT);
6496 V1 = DAG.getUNDEF(VT);
6498 // Check if N implements a horizontal binop.
6499 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6500 SDValue Op = N->getOperand(i + BaseIdx);
6503 if (Op->getOpcode() == ISD::UNDEF) {
6504 // Update the expected vector extract index.
6505 if (i * 2 == NumElts)
6506 ExpectedVExtractIdx = BaseIdx;
6507 ExpectedVExtractIdx += 2;
6511 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6516 SDValue Op0 = Op.getOperand(0);
6517 SDValue Op1 = Op.getOperand(1);
6519 // Try to match the following pattern:
6520 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6521 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6522 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6523 Op0.getOperand(0) == Op1.getOperand(0) &&
6524 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6525 isa<ConstantSDNode>(Op1.getOperand(1)));
6529 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6530 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6532 if (i * 2 < NumElts) {
6533 if (V0.getOpcode() == ISD::UNDEF)
6534 V0 = Op0.getOperand(0);
6536 if (V1.getOpcode() == ISD::UNDEF)
6537 V1 = Op0.getOperand(0);
6538 if (i * 2 == NumElts)
6539 ExpectedVExtractIdx = BaseIdx;
6542 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6543 if (I0 == ExpectedVExtractIdx)
6544 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6545 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6546 // Try to match the following dag sequence:
6547 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6548 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6552 ExpectedVExtractIdx += 2;
6558 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6559 /// a concat_vector.
6561 /// This is a helper function of PerformBUILD_VECTORCombine.
6562 /// This function expects two 256-bit vectors called V0 and V1.
6563 /// At first, each vector is split into two separate 128-bit vectors.
6564 /// Then, the resulting 128-bit vectors are used to implement two
6565 /// horizontal binary operations.
6567 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6569 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6570 /// the two new horizontal binop.
6571 /// When Mode is set, the first horizontal binop dag node would take as input
6572 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6573 /// horizontal binop dag node would take as input the lower 128-bit of V1
6574 /// and the upper 128-bit of V1.
6576 /// HADD V0_LO, V0_HI
6577 /// HADD V1_LO, V1_HI
6579 /// Otherwise, the first horizontal binop dag node takes as input the lower
6580 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6581 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6583 /// HADD V0_LO, V1_LO
6584 /// HADD V0_HI, V1_HI
6586 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6587 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6588 /// the upper 128-bits of the result.
6589 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6590 SDLoc DL, SelectionDAG &DAG,
6591 unsigned X86Opcode, bool Mode,
6592 bool isUndefLO, bool isUndefHI) {
6593 EVT VT = V0.getValueType();
6594 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6595 "Invalid nodes in input!");
6597 unsigned NumElts = VT.getVectorNumElements();
6598 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6599 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6600 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6601 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6602 EVT NewVT = V0_LO.getValueType();
6604 SDValue LO = DAG.getUNDEF(NewVT);
6605 SDValue HI = DAG.getUNDEF(NewVT);
6608 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6609 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6610 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6611 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6612 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6614 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6615 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6616 V1_LO->getOpcode() != ISD::UNDEF))
6617 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6619 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6620 V1_HI->getOpcode() != ISD::UNDEF))
6621 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6624 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6627 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6628 /// sequence of 'vadd + vsub + blendi'.
6629 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6630 const X86Subtarget *Subtarget) {
6632 EVT VT = BV->getValueType(0);
6633 unsigned NumElts = VT.getVectorNumElements();
6634 SDValue InVec0 = DAG.getUNDEF(VT);
6635 SDValue InVec1 = DAG.getUNDEF(VT);
6637 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6638 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6640 // Odd-numbered elements in the input build vector are obtained from
6641 // adding two integer/float elements.
6642 // Even-numbered elements in the input build vector are obtained from
6643 // subtracting two integer/float elements.
6644 unsigned ExpectedOpcode = ISD::FSUB;
6645 unsigned NextExpectedOpcode = ISD::FADD;
6646 bool AddFound = false;
6647 bool SubFound = false;
6649 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6650 SDValue Op = BV->getOperand(i);
6652 // Skip 'undef' values.
6653 unsigned Opcode = Op.getOpcode();
6654 if (Opcode == ISD::UNDEF) {
6655 std::swap(ExpectedOpcode, NextExpectedOpcode);
6659 // Early exit if we found an unexpected opcode.
6660 if (Opcode != ExpectedOpcode)
6663 SDValue Op0 = Op.getOperand(0);
6664 SDValue Op1 = Op.getOperand(1);
6666 // Try to match the following pattern:
6667 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6668 // Early exit if we cannot match that sequence.
6669 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6670 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6671 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6672 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6673 Op0.getOperand(1) != Op1.getOperand(1))
6676 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6680 // We found a valid add/sub node. Update the information accordingly.
6686 // Update InVec0 and InVec1.
6687 if (InVec0.getOpcode() == ISD::UNDEF)
6688 InVec0 = Op0.getOperand(0);
6689 if (InVec1.getOpcode() == ISD::UNDEF)
6690 InVec1 = Op1.getOperand(0);
6692 // Make sure that operands in input to each add/sub node always
6693 // come from a same pair of vectors.
6694 if (InVec0 != Op0.getOperand(0)) {
6695 if (ExpectedOpcode == ISD::FSUB)
6698 // FADD is commutable. Try to commute the operands
6699 // and then test again.
6700 std::swap(Op0, Op1);
6701 if (InVec0 != Op0.getOperand(0))
6705 if (InVec1 != Op1.getOperand(0))
6708 // Update the pair of expected opcodes.
6709 std::swap(ExpectedOpcode, NextExpectedOpcode);
6712 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6713 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6714 InVec1.getOpcode() != ISD::UNDEF)
6715 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6720 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6721 const X86Subtarget *Subtarget) {
6723 EVT VT = N->getValueType(0);
6724 unsigned NumElts = VT.getVectorNumElements();
6725 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6726 SDValue InVec0, InVec1;
6728 // Try to match an ADDSUB.
6729 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6730 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6731 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6732 if (Value.getNode())
6736 // Try to match horizontal ADD/SUB.
6737 unsigned NumUndefsLO = 0;
6738 unsigned NumUndefsHI = 0;
6739 unsigned Half = NumElts/2;
6741 // Count the number of UNDEF operands in the build_vector in input.
6742 for (unsigned i = 0, e = Half; i != e; ++i)
6743 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6746 for (unsigned i = Half, e = NumElts; i != e; ++i)
6747 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6750 // Early exit if this is either a build_vector of all UNDEFs or all the
6751 // operands but one are UNDEF.
6752 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6755 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6756 // Try to match an SSE3 float HADD/HSUB.
6757 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6758 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6760 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6761 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6762 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6763 // Try to match an SSSE3 integer HADD/HSUB.
6764 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6765 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6767 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6768 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6771 if (!Subtarget->hasAVX())
6774 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6775 // Try to match an AVX horizontal add/sub of packed single/double
6776 // precision floating point values from 256-bit vectors.
6777 SDValue InVec2, InVec3;
6778 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6779 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6780 ((InVec0.getOpcode() == ISD::UNDEF ||
6781 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6782 ((InVec1.getOpcode() == ISD::UNDEF ||
6783 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6784 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6786 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6787 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6788 ((InVec0.getOpcode() == ISD::UNDEF ||
6789 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6790 ((InVec1.getOpcode() == ISD::UNDEF ||
6791 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6792 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6793 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6794 // Try to match an AVX2 horizontal add/sub of signed integers.
6795 SDValue InVec2, InVec3;
6797 bool CanFold = true;
6799 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6800 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6801 ((InVec0.getOpcode() == ISD::UNDEF ||
6802 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6803 ((InVec1.getOpcode() == ISD::UNDEF ||
6804 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6805 X86Opcode = X86ISD::HADD;
6806 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6807 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6808 ((InVec0.getOpcode() == ISD::UNDEF ||
6809 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6810 ((InVec1.getOpcode() == ISD::UNDEF ||
6811 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6812 X86Opcode = X86ISD::HSUB;
6817 // Fold this build_vector into a single horizontal add/sub.
6818 // Do this only if the target has AVX2.
6819 if (Subtarget->hasAVX2())
6820 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6822 // Do not try to expand this build_vector into a pair of horizontal
6823 // add/sub if we can emit a pair of scalar add/sub.
6824 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6827 // Convert this build_vector into a pair of horizontal binop followed by
6829 bool isUndefLO = NumUndefsLO == Half;
6830 bool isUndefHI = NumUndefsHI == Half;
6831 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6832 isUndefLO, isUndefHI);
6836 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6837 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6839 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6840 X86Opcode = X86ISD::HADD;
6841 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6842 X86Opcode = X86ISD::HSUB;
6843 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6844 X86Opcode = X86ISD::FHADD;
6845 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6846 X86Opcode = X86ISD::FHSUB;
6850 // Don't try to expand this build_vector into a pair of horizontal add/sub
6851 // if we can simply emit a pair of scalar add/sub.
6852 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6855 // Convert this build_vector into two horizontal add/sub followed by
6857 bool isUndefLO = NumUndefsLO == Half;
6858 bool isUndefHI = NumUndefsHI == Half;
6859 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6860 isUndefLO, isUndefHI);
6867 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6870 MVT VT = Op.getSimpleValueType();
6871 MVT ExtVT = VT.getVectorElementType();
6872 unsigned NumElems = Op.getNumOperands();
6874 // Generate vectors for predicate vectors.
6875 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6876 return LowerBUILD_VECTORvXi1(Op, DAG);
6878 // Vectors containing all zeros can be matched by pxor and xorps later
6879 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6880 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6881 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6882 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6885 return getZeroVector(VT, Subtarget, DAG, dl);
6888 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6889 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6890 // vpcmpeqd on 256-bit vectors.
6891 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6892 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6895 if (!VT.is512BitVector())
6896 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6899 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6900 if (Broadcast.getNode())
6903 unsigned EVTBits = ExtVT.getSizeInBits();
6905 unsigned NumZero = 0;
6906 unsigned NumNonZero = 0;
6907 unsigned NonZeros = 0;
6908 bool IsAllConstants = true;
6909 SmallSet<SDValue, 8> Values;
6910 for (unsigned i = 0; i < NumElems; ++i) {
6911 SDValue Elt = Op.getOperand(i);
6912 if (Elt.getOpcode() == ISD::UNDEF)
6915 if (Elt.getOpcode() != ISD::Constant &&
6916 Elt.getOpcode() != ISD::ConstantFP)
6917 IsAllConstants = false;
6918 if (X86::isZeroNode(Elt))
6921 NonZeros |= (1 << i);
6926 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6927 if (NumNonZero == 0)
6928 return DAG.getUNDEF(VT);
6930 // Special case for single non-zero, non-undef, element.
6931 if (NumNonZero == 1) {
6932 unsigned Idx = countTrailingZeros(NonZeros);
6933 SDValue Item = Op.getOperand(Idx);
6935 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6936 // the value are obviously zero, truncate the value to i32 and do the
6937 // insertion that way. Only do this if the value is non-constant or if the
6938 // value is a constant being inserted into element 0. It is cheaper to do
6939 // a constant pool load than it is to do a movd + shuffle.
6940 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6941 (!IsAllConstants || Idx == 0)) {
6942 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6944 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6945 EVT VecVT = MVT::v4i32;
6946 unsigned VecElts = 4;
6948 // Truncate the value (which may itself be a constant) to i32, and
6949 // convert it to a vector with movd (S2V+shuffle to zero extend).
6950 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6951 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6953 // If using the new shuffle lowering, just directly insert this.
6954 if (ExperimentalVectorShuffleLowering)
6956 ISD::BITCAST, dl, VT,
6957 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6959 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6961 // Now we have our 32-bit value zero extended in the low element of
6962 // a vector. If Idx != 0, swizzle it into place.
6964 SmallVector<int, 4> Mask;
6965 Mask.push_back(Idx);
6966 for (unsigned i = 1; i != VecElts; ++i)
6968 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6971 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6975 // If we have a constant or non-constant insertion into the low element of
6976 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6977 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6978 // depending on what the source datatype is.
6981 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6983 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6984 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6985 if (VT.is256BitVector() || VT.is512BitVector()) {
6986 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6987 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6988 Item, DAG.getIntPtrConstant(0));
6990 assert(VT.is128BitVector() && "Expected an SSE value type!");
6991 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6992 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6993 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6996 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
6997 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
6998 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
6999 if (VT.is256BitVector()) {
7000 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7001 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7003 assert(VT.is128BitVector() && "Expected an SSE value type!");
7004 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7006 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7010 // Is it a vector logical left shift?
7011 if (NumElems == 2 && Idx == 1 &&
7012 X86::isZeroNode(Op.getOperand(0)) &&
7013 !X86::isZeroNode(Op.getOperand(1))) {
7014 unsigned NumBits = VT.getSizeInBits();
7015 return getVShift(true, VT,
7016 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7017 VT, Op.getOperand(1)),
7018 NumBits/2, DAG, *this, dl);
7021 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7024 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7025 // is a non-constant being inserted into an element other than the low one,
7026 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7027 // movd/movss) to move this into the low element, then shuffle it into
7029 if (EVTBits == 32) {
7030 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7032 // If using the new shuffle lowering, just directly insert this.
7033 if (ExperimentalVectorShuffleLowering)
7034 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7036 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7037 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7038 SmallVector<int, 8> MaskVec;
7039 for (unsigned i = 0; i != NumElems; ++i)
7040 MaskVec.push_back(i == Idx ? 0 : 1);
7041 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7045 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7046 if (Values.size() == 1) {
7047 if (EVTBits == 32) {
7048 // Instead of a shuffle like this:
7049 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7050 // Check if it's possible to issue this instead.
7051 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7052 unsigned Idx = countTrailingZeros(NonZeros);
7053 SDValue Item = Op.getOperand(Idx);
7054 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7055 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7060 // A vector full of immediates; various special cases are already
7061 // handled, so this is best done with a single constant-pool load.
7065 // For AVX-length vectors, see if we can use a vector load to get all of the
7066 // elements, otherwise build the individual 128-bit pieces and use
7067 // shuffles to put them in place.
7068 if (VT.is256BitVector() || VT.is512BitVector()) {
7069 SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
7071 // Check for a build vector of consecutive loads.
7072 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7075 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7077 // Build both the lower and upper subvector.
7078 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7079 makeArrayRef(&V[0], NumElems/2));
7080 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7081 makeArrayRef(&V[NumElems / 2], NumElems/2));
7083 // Recreate the wider vector with the lower and upper part.
7084 if (VT.is256BitVector())
7085 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7086 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7089 // Let legalizer expand 2-wide build_vectors.
7090 if (EVTBits == 64) {
7091 if (NumNonZero == 1) {
7092 // One half is zero or undef.
7093 unsigned Idx = countTrailingZeros(NonZeros);
7094 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7095 Op.getOperand(Idx));
7096 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7101 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7102 if (EVTBits == 8 && NumElems == 16) {
7103 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7105 if (V.getNode()) return V;
7108 if (EVTBits == 16 && NumElems == 8) {
7109 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7111 if (V.getNode()) return V;
7114 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7115 if (EVTBits == 32 && NumElems == 4) {
7116 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7121 // If element VT is == 32 bits, turn it into a number of shuffles.
7122 SmallVector<SDValue, 8> V(NumElems);
7123 if (NumElems == 4 && NumZero > 0) {
7124 for (unsigned i = 0; i < 4; ++i) {
7125 bool isZero = !(NonZeros & (1 << i));
7127 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7129 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7132 for (unsigned i = 0; i < 2; ++i) {
7133 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7136 V[i] = V[i*2]; // Must be a zero vector.
7139 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7142 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7145 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7150 bool Reverse1 = (NonZeros & 0x3) == 2;
7151 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7155 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7156 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7158 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7161 if (Values.size() > 1 && VT.is128BitVector()) {
7162 // Check for a build vector of consecutive loads.
7163 for (unsigned i = 0; i < NumElems; ++i)
7164 V[i] = Op.getOperand(i);
7166 // Check for elements which are consecutive loads.
7167 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7171 // Check for a build vector from mostly shuffle plus few inserting.
7172 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7176 // For SSE 4.1, use insertps to put the high elements into the low element.
7177 if (Subtarget->hasSSE41()) {
7179 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7180 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7182 Result = DAG.getUNDEF(VT);
7184 for (unsigned i = 1; i < NumElems; ++i) {
7185 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7186 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7187 Op.getOperand(i), DAG.getIntPtrConstant(i));
7192 // Otherwise, expand into a number of unpckl*, start by extending each of
7193 // our (non-undef) elements to the full vector width with the element in the
7194 // bottom slot of the vector (which generates no code for SSE).
7195 for (unsigned i = 0; i < NumElems; ++i) {
7196 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7197 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7199 V[i] = DAG.getUNDEF(VT);
7202 // Next, we iteratively mix elements, e.g. for v4f32:
7203 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7204 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7205 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7206 unsigned EltStride = NumElems >> 1;
7207 while (EltStride != 0) {
7208 for (unsigned i = 0; i < EltStride; ++i) {
7209 // If V[i+EltStride] is undef and this is the first round of mixing,
7210 // then it is safe to just drop this shuffle: V[i] is already in the
7211 // right place, the one element (since it's the first round) being
7212 // inserted as undef can be dropped. This isn't safe for successive
7213 // rounds because they will permute elements within both vectors.
7214 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7215 EltStride == NumElems/2)
7218 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7227 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7228 // to create 256-bit vectors from two other 128-bit ones.
7229 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7231 MVT ResVT = Op.getSimpleValueType();
7233 assert((ResVT.is256BitVector() ||
7234 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7236 SDValue V1 = Op.getOperand(0);
7237 SDValue V2 = Op.getOperand(1);
7238 unsigned NumElems = ResVT.getVectorNumElements();
7239 if(ResVT.is256BitVector())
7240 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7242 if (Op.getNumOperands() == 4) {
7243 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7244 ResVT.getVectorNumElements()/2);
7245 SDValue V3 = Op.getOperand(2);
7246 SDValue V4 = Op.getOperand(3);
7247 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7248 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7250 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7253 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7254 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7255 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7256 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7257 Op.getNumOperands() == 4)));
7259 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7260 // from two other 128-bit ones.
7262 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7263 return LowerAVXCONCAT_VECTORS(Op, DAG);
7267 //===----------------------------------------------------------------------===//
7268 // Vector shuffle lowering
7270 // This is an experimental code path for lowering vector shuffles on x86. It is
7271 // designed to handle arbitrary vector shuffles and blends, gracefully
7272 // degrading performance as necessary. It works hard to recognize idiomatic
7273 // shuffles and lower them to optimal instruction patterns without leaving
7274 // a framework that allows reasonably efficient handling of all vector shuffle
7276 //===----------------------------------------------------------------------===//
7278 /// \brief Tiny helper function to identify a no-op mask.
7280 /// This is a somewhat boring predicate function. It checks whether the mask
7281 /// array input, which is assumed to be a single-input shuffle mask of the kind
7282 /// used by the X86 shuffle instructions (not a fully general
7283 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7284 /// in-place shuffle are 'no-op's.
7285 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7286 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7287 if (Mask[i] != -1 && Mask[i] != i)
7292 /// \brief Helper function to classify a mask as a single-input mask.
7294 /// This isn't a generic single-input test because in the vector shuffle
7295 /// lowering we canonicalize single inputs to be the first input operand. This
7296 /// means we can more quickly test for a single input by only checking whether
7297 /// an input from the second operand exists. We also assume that the size of
7298 /// mask corresponds to the size of the input vectors which isn't true in the
7299 /// fully general case.
7300 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7302 if (M >= (int)Mask.size())
7307 /// \brief Test whether there are elements crossing 128-bit lanes in this
7310 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7311 /// and we routinely test for these.
7312 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7313 int LaneSize = 128 / VT.getScalarSizeInBits();
7314 int Size = Mask.size();
7315 for (int i = 0; i < Size; ++i)
7316 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7321 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7323 /// This checks a shuffle mask to see if it is performing the same
7324 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7325 /// that it is also not lane-crossing. It may however involve a blend from the
7326 /// same lane of a second vector.
7328 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7329 /// non-trivial to compute in the face of undef lanes. The representation is
7330 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7331 /// entries from both V1 and V2 inputs to the wider mask.
7333 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7334 SmallVectorImpl<int> &RepeatedMask) {
7335 int LaneSize = 128 / VT.getScalarSizeInBits();
7336 RepeatedMask.resize(LaneSize, -1);
7337 int Size = Mask.size();
7338 for (int i = 0; i < Size; ++i) {
7341 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7342 // This entry crosses lanes, so there is no way to model this shuffle.
7345 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7346 if (RepeatedMask[i % LaneSize] == -1)
7347 // This is the first non-undef entry in this slot of a 128-bit lane.
7348 RepeatedMask[i % LaneSize] =
7349 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7350 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7351 // Found a mismatch with the repeated mask.
7357 /// \brief Base case helper for testing a single mask element.
7358 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7359 BuildVectorSDNode *BV1,
7360 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7362 int Size = Mask.size();
7363 if (Mask[i] != -1 && Mask[i] != Arg) {
7364 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7365 auto *ArgsBV = Arg < Size ? BV1 : BV2;
7366 if (!MaskBV || !ArgsBV ||
7367 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
7373 /// \brief Recursive helper to peel off and test each mask element.
7374 template <typename... Ts>
7375 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7376 BuildVectorSDNode *BV1,
7377 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7378 int i, int Arg, Ts... Args) {
7379 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
7382 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
7385 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7388 /// This is a fast way to test a shuffle mask against a fixed pattern:
7390 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7392 /// It returns true if the mask is exactly as wide as the argument list, and
7393 /// each element of the mask is either -1 (signifying undef) or the value given
7394 /// in the argument.
7395 template <typename... Ts>
7396 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7398 if (Mask.size() != sizeof...(Args))
7401 // If the values are build vectors, we can look through them to find
7402 // equivalent inputs that make the shuffles equivalent.
7403 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7404 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7406 // Recursively peel off arguments and test them against the mask.
7407 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
7410 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7412 /// This helper function produces an 8-bit shuffle immediate corresponding to
7413 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7414 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7417 /// NB: We rely heavily on "undef" masks preserving the input lane.
7418 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7419 SelectionDAG &DAG) {
7420 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7421 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7422 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7423 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7424 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7427 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7428 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7429 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7430 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7431 return DAG.getConstant(Imm, MVT::i8);
7434 /// \brief Try to emit a blend instruction for a shuffle using bit math.
7436 /// This is used as a fallback approach when first class blend instructions are
7437 /// unavailable. Currently it is only suitable for integer vectors, but could
7438 /// be generalized for floating point vectors if desirable.
7439 static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
7440 SDValue V2, ArrayRef<int> Mask,
7441 SelectionDAG &DAG) {
7442 assert(VT.isInteger() && "Only supports integer vector types!");
7443 MVT EltVT = VT.getScalarType();
7444 int NumEltBits = EltVT.getSizeInBits();
7445 SDValue Zero = DAG.getConstant(0, EltVT);
7446 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), EltVT);
7447 SmallVector<SDValue, 16> MaskOps;
7448 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7449 if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
7450 return SDValue(); // Shuffled input!
7451 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
7454 SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
7455 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
7456 // We have to cast V2 around.
7457 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
7458 V2 = DAG.getNode(ISD::BITCAST, DL, VT,
7459 DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
7460 DAG.getNode(ISD::BITCAST, DL, MaskVT, V1Mask),
7461 DAG.getNode(ISD::BITCAST, DL, MaskVT, V2)));
7462 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
7465 /// \brief Try to emit a blend instruction for a shuffle.
7467 /// This doesn't do any checks for the availability of instructions for blending
7468 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7469 /// be matched in the backend with the type given. What it does check for is
7470 /// that the shuffle mask is in fact a blend.
7471 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7472 SDValue V2, ArrayRef<int> Mask,
7473 const X86Subtarget *Subtarget,
7474 SelectionDAG &DAG) {
7475 unsigned BlendMask = 0;
7476 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7477 if (Mask[i] >= Size) {
7478 if (Mask[i] != i + Size)
7479 return SDValue(); // Shuffled V2 input!
7480 BlendMask |= 1u << i;
7483 if (Mask[i] >= 0 && Mask[i] != i)
7484 return SDValue(); // Shuffled V1 input!
7486 switch (VT.SimpleTy) {
7491 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7492 DAG.getConstant(BlendMask, MVT::i8));
7496 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7500 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7501 // that instruction.
7502 if (Subtarget->hasAVX2()) {
7503 // Scale the blend by the number of 32-bit dwords per element.
7504 int Scale = VT.getScalarSizeInBits() / 32;
7506 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7507 if (Mask[i] >= Size)
7508 for (int j = 0; j < Scale; ++j)
7509 BlendMask |= 1u << (i * Scale + j);
7511 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7512 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7513 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7514 return DAG.getNode(ISD::BITCAST, DL, VT,
7515 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7516 DAG.getConstant(BlendMask, MVT::i8)));
7520 // For integer shuffles we need to expand the mask and cast the inputs to
7521 // v8i16s prior to blending.
7522 int Scale = 8 / VT.getVectorNumElements();
7524 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7525 if (Mask[i] >= Size)
7526 for (int j = 0; j < Scale; ++j)
7527 BlendMask |= 1u << (i * Scale + j);
7529 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7530 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7531 return DAG.getNode(ISD::BITCAST, DL, VT,
7532 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7533 DAG.getConstant(BlendMask, MVT::i8)));
7537 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7538 SmallVector<int, 8> RepeatedMask;
7539 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7540 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7541 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7543 for (int i = 0; i < 8; ++i)
7544 if (RepeatedMask[i] >= 16)
7545 BlendMask |= 1u << i;
7546 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7547 DAG.getConstant(BlendMask, MVT::i8));
7553 // Scale the blend by the number of bytes per element.
7554 int Scale = VT.getScalarSizeInBits() / 8;
7556 // This form of blend is always done on bytes. Compute the byte vector
7558 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7560 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7561 // mix of LLVM's code generator and the x86 backend. We tell the code
7562 // generator that boolean values in the elements of an x86 vector register
7563 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7564 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7565 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7566 // of the element (the remaining are ignored) and 0 in that high bit would
7567 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7568 // the LLVM model for boolean values in vector elements gets the relevant
7569 // bit set, it is set backwards and over constrained relative to x86's
7571 SmallVector<SDValue, 32> VSELECTMask;
7572 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7573 for (int j = 0; j < Scale; ++j)
7574 VSELECTMask.push_back(
7575 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7576 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
7578 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7579 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7581 ISD::BITCAST, DL, VT,
7582 DAG.getNode(ISD::VSELECT, DL, BlendVT,
7583 DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
7588 llvm_unreachable("Not a supported integer vector type!");
7592 /// \brief Try to lower as a blend of elements from two inputs followed by
7593 /// a single-input permutation.
7595 /// This matches the pattern where we can blend elements from two inputs and
7596 /// then reduce the shuffle to a single-input permutation.
7597 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7600 SelectionDAG &DAG) {
7601 // We build up the blend mask while checking whether a blend is a viable way
7602 // to reduce the shuffle.
7603 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7604 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7606 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7610 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7612 if (BlendMask[Mask[i] % Size] == -1)
7613 BlendMask[Mask[i] % Size] = Mask[i];
7614 else if (BlendMask[Mask[i] % Size] != Mask[i])
7615 return SDValue(); // Can't blend in the needed input!
7617 PermuteMask[i] = Mask[i] % Size;
7620 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7621 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7624 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7625 /// blends and permutes.
7627 /// This matches the extremely common pattern for handling combined
7628 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7629 /// operations. It will try to pick the best arrangement of shuffles and
7631 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7635 SelectionDAG &DAG) {
7636 // Shuffle the input elements into the desired positions in V1 and V2 and
7637 // blend them together.
7638 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7639 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7640 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7641 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7642 if (Mask[i] >= 0 && Mask[i] < Size) {
7643 V1Mask[i] = Mask[i];
7645 } else if (Mask[i] >= Size) {
7646 V2Mask[i] = Mask[i] - Size;
7647 BlendMask[i] = i + Size;
7650 // Try to lower with the simpler initial blend strategy unless one of the
7651 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7652 // shuffle may be able to fold with a load or other benefit. However, when
7653 // we'll have to do 2x as many shuffles in order to achieve this, blending
7654 // first is a better strategy.
7655 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7656 if (SDValue BlendPerm =
7657 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7660 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7661 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7662 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7665 /// \brief Try to lower a vector shuffle as a byte rotation.
7667 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7668 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7669 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7670 /// try to generically lower a vector shuffle through such an pattern. It
7671 /// does not check for the profitability of lowering either as PALIGNR or
7672 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7673 /// This matches shuffle vectors that look like:
7675 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7677 /// Essentially it concatenates V1 and V2, shifts right by some number of
7678 /// elements, and takes the low elements as the result. Note that while this is
7679 /// specified as a *right shift* because x86 is little-endian, it is a *left
7680 /// rotate* of the vector lanes.
7681 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7684 const X86Subtarget *Subtarget,
7685 SelectionDAG &DAG) {
7686 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7688 int NumElts = Mask.size();
7689 int NumLanes = VT.getSizeInBits() / 128;
7690 int NumLaneElts = NumElts / NumLanes;
7692 // We need to detect various ways of spelling a rotation:
7693 // [11, 12, 13, 14, 15, 0, 1, 2]
7694 // [-1, 12, 13, 14, -1, -1, 1, -1]
7695 // [-1, -1, -1, -1, -1, -1, 1, 2]
7696 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7697 // [-1, 4, 5, 6, -1, -1, 9, -1]
7698 // [-1, 4, 5, 6, -1, -1, -1, -1]
7701 for (int l = 0; l < NumElts; l += NumLaneElts) {
7702 for (int i = 0; i < NumLaneElts; ++i) {
7703 if (Mask[l + i] == -1)
7705 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7707 // Get the mod-Size index and lane correct it.
7708 int LaneIdx = (Mask[l + i] % NumElts) - l;
7709 // Make sure it was in this lane.
7710 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7713 // Determine where a rotated vector would have started.
7714 int StartIdx = i - LaneIdx;
7716 // The identity rotation isn't interesting, stop.
7719 // If we found the tail of a vector the rotation must be the missing
7720 // front. If we found the head of a vector, it must be how much of the
7722 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7725 Rotation = CandidateRotation;
7726 else if (Rotation != CandidateRotation)
7727 // The rotations don't match, so we can't match this mask.
7730 // Compute which value this mask is pointing at.
7731 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7733 // Compute which of the two target values this index should be assigned
7734 // to. This reflects whether the high elements are remaining or the low
7735 // elements are remaining.
7736 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7738 // Either set up this value if we've not encountered it before, or check
7739 // that it remains consistent.
7742 else if (TargetV != MaskV)
7743 // This may be a rotation, but it pulls from the inputs in some
7744 // unsupported interleaving.
7749 // Check that we successfully analyzed the mask, and normalize the results.
7750 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7751 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7757 // The actual rotate instruction rotates bytes, so we need to scale the
7758 // rotation based on how many bytes are in the vector lane.
7759 int Scale = 16 / NumLaneElts;
7761 // SSSE3 targets can use the palignr instruction.
7762 if (Subtarget->hasSSSE3()) {
7763 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7764 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7765 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
7766 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
7768 return DAG.getNode(ISD::BITCAST, DL, VT,
7769 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
7770 DAG.getConstant(Rotation * Scale, MVT::i8)));
7773 assert(VT.getSizeInBits() == 128 &&
7774 "Rotate-based lowering only supports 128-bit lowering!");
7775 assert(Mask.size() <= 16 &&
7776 "Can shuffle at most 16 bytes in a 128-bit vector!");
7778 // Default SSE2 implementation
7779 int LoByteShift = 16 - Rotation * Scale;
7780 int HiByteShift = Rotation * Scale;
7782 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7783 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7784 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7786 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7787 DAG.getConstant(LoByteShift, MVT::i8));
7788 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7789 DAG.getConstant(HiByteShift, MVT::i8));
7790 return DAG.getNode(ISD::BITCAST, DL, VT,
7791 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7794 /// \brief Compute whether each element of a shuffle is zeroable.
7796 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7797 /// Either it is an undef element in the shuffle mask, the element of the input
7798 /// referenced is undef, or the element of the input referenced is known to be
7799 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7800 /// as many lanes with this technique as possible to simplify the remaining
7802 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7803 SDValue V1, SDValue V2) {
7804 SmallBitVector Zeroable(Mask.size(), false);
7806 while (V1.getOpcode() == ISD::BITCAST)
7807 V1 = V1->getOperand(0);
7808 while (V2.getOpcode() == ISD::BITCAST)
7809 V2 = V2->getOperand(0);
7811 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7812 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7814 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7816 // Handle the easy cases.
7817 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7822 // If this is an index into a build_vector node (which has the same number
7823 // of elements), dig out the input value and use it.
7824 SDValue V = M < Size ? V1 : V2;
7825 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7828 SDValue Input = V.getOperand(M % Size);
7829 // The UNDEF opcode check really should be dead code here, but not quite
7830 // worth asserting on (it isn't invalid, just unexpected).
7831 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7838 /// \brief Try to emit a bitmask instruction for a shuffle.
7840 /// This handles cases where we can model a blend exactly as a bitmask due to
7841 /// one of the inputs being zeroable.
7842 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7843 SDValue V2, ArrayRef<int> Mask,
7844 SelectionDAG &DAG) {
7845 MVT EltVT = VT.getScalarType();
7846 int NumEltBits = EltVT.getSizeInBits();
7847 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7848 SDValue Zero = DAG.getConstant(0, IntEltVT);
7849 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7850 if (EltVT.isFloatingPoint()) {
7851 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7852 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7854 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7855 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7857 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7860 if (Mask[i] % Size != i)
7861 return SDValue(); // Not a blend.
7863 V = Mask[i] < Size ? V1 : V2;
7864 else if (V != (Mask[i] < Size ? V1 : V2))
7865 return SDValue(); // Can only let one input through the mask.
7867 VMaskOps[i] = AllOnes;
7870 return SDValue(); // No non-zeroable elements!
7872 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7873 V = DAG.getNode(VT.isFloatingPoint()
7874 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7879 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7881 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
7882 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
7883 /// matches elements from one of the input vectors shuffled to the left or
7884 /// right with zeroable elements 'shifted in'. It handles both the strictly
7885 /// bit-wise element shifts and the byte shift across an entire 128-bit double
7888 /// PSHL : (little-endian) left bit shift.
7889 /// [ zz, 0, zz, 2 ]
7890 /// [ -1, 4, zz, -1 ]
7891 /// PSRL : (little-endian) right bit shift.
7893 /// [ -1, -1, 7, zz]
7894 /// PSLLDQ : (little-endian) left byte shift
7895 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
7896 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
7897 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
7898 /// PSRLDQ : (little-endian) right byte shift
7899 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
7900 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
7901 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
7902 static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
7903 SDValue V2, ArrayRef<int> Mask,
7904 SelectionDAG &DAG) {
7905 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7907 int Size = Mask.size();
7908 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7910 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
7911 for (int i = 0; i < Size; i += Scale)
7912 for (int j = 0; j < Shift; ++j)
7913 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
7919 auto MatchShift = [&](int Shift, int Scale, bool Left, SDValue V) {
7920 for (int i = 0; i != Size; i += Scale) {
7921 unsigned Pos = Left ? i + Shift : i;
7922 unsigned Low = Left ? i : i + Shift;
7923 unsigned Len = Scale - Shift;
7924 if (!isSequentialOrUndefInRange(Mask, Pos, Len,
7925 Low + (V == V1 ? 0 : Size)))
7929 int ShiftEltBits = VT.getScalarSizeInBits() * Scale;
7930 bool ByteShift = ShiftEltBits > 64;
7931 unsigned OpCode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
7932 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
7933 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1);
7935 // Normalize the scale for byte shifts to still produce an i64 element
7937 Scale = ByteShift ? Scale / 2 : Scale;
7939 // We need to round trip through the appropriate type for the shift.
7940 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7941 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7942 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7943 "Illegal integer vector type");
7944 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7946 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7947 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7950 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7951 // keep doubling the size of the integer elements up to that. We can
7952 // then shift the elements of the integer vector by whole multiples of
7953 // their width within the elements of the larger integer vector. Test each
7954 // multiple to see if we can find a match with the moved element indices
7955 // and that the shifted in elements are all zeroable.
7956 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
7957 for (int Shift = 1; Shift != Scale; ++Shift)
7958 for (bool Left : {true, false})
7959 if (CheckZeros(Shift, Scale, Left))
7960 for (SDValue V : {V1, V2})
7961 if (SDValue Match = MatchShift(Shift, Scale, Left, V))
7968 /// \brief Lower a vector shuffle as a zero or any extension.
7970 /// Given a specific number of elements, element bit width, and extension
7971 /// stride, produce either a zero or any extension based on the available
7972 /// features of the subtarget.
7973 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7974 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7975 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7976 assert(Scale > 1 && "Need a scale to extend.");
7977 int NumElements = VT.getVectorNumElements();
7978 int EltBits = VT.getScalarSizeInBits();
7979 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7980 "Only 8, 16, and 32 bit elements can be extended.");
7981 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7983 // Found a valid zext mask! Try various lowering strategies based on the
7984 // input type and available ISA extensions.
7985 if (Subtarget->hasSSE41()) {
7986 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7987 NumElements / Scale);
7988 return DAG.getNode(ISD::BITCAST, DL, VT,
7989 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7992 // For any extends we can cheat for larger element sizes and use shuffle
7993 // instructions that can fold with a load and/or copy.
7994 if (AnyExt && EltBits == 32) {
7995 int PSHUFDMask[4] = {0, -1, 1, -1};
7997 ISD::BITCAST, DL, VT,
7998 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7999 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8000 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8002 if (AnyExt && EltBits == 16 && Scale > 2) {
8003 int PSHUFDMask[4] = {0, -1, 0, -1};
8004 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8005 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8006 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8007 int PSHUFHWMask[4] = {1, -1, -1, -1};
8009 ISD::BITCAST, DL, VT,
8010 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8011 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8012 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8015 // If this would require more than 2 unpack instructions to expand, use
8016 // pshufb when available. We can only use more than 2 unpack instructions
8017 // when zero extending i8 elements which also makes it easier to use pshufb.
8018 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8019 assert(NumElements == 16 && "Unexpected byte vector width!");
8020 SDValue PSHUFBMask[16];
8021 for (int i = 0; i < 16; ++i)
8023 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8024 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8025 return DAG.getNode(ISD::BITCAST, DL, VT,
8026 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8027 DAG.getNode(ISD::BUILD_VECTOR, DL,
8028 MVT::v16i8, PSHUFBMask)));
8031 // Otherwise emit a sequence of unpacks.
8033 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8034 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8035 : getZeroVector(InputVT, Subtarget, DAG, DL);
8036 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8037 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8041 } while (Scale > 1);
8042 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8045 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8047 /// This routine will try to do everything in its power to cleverly lower
8048 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8049 /// check for the profitability of this lowering, it tries to aggressively
8050 /// match this pattern. It will use all of the micro-architectural details it
8051 /// can to emit an efficient lowering. It handles both blends with all-zero
8052 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8053 /// masking out later).
8055 /// The reason we have dedicated lowering for zext-style shuffles is that they
8056 /// are both incredibly common and often quite performance sensitive.
8057 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8058 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8059 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8060 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8062 int Bits = VT.getSizeInBits();
8063 int NumElements = VT.getVectorNumElements();
8064 assert(VT.getScalarSizeInBits() <= 32 &&
8065 "Exceeds 32-bit integer zero extension limit");
8066 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8068 // Define a helper function to check a particular ext-scale and lower to it if
8070 auto Lower = [&](int Scale) -> SDValue {
8073 for (int i = 0; i < NumElements; ++i) {
8075 continue; // Valid anywhere but doesn't tell us anything.
8076 if (i % Scale != 0) {
8077 // Each of the extended elements need to be zeroable.
8081 // We no longer are in the anyext case.
8086 // Each of the base elements needs to be consecutive indices into the
8087 // same input vector.
8088 SDValue V = Mask[i] < NumElements ? V1 : V2;
8091 else if (InputV != V)
8092 return SDValue(); // Flip-flopping inputs.
8094 if (Mask[i] % NumElements != i / Scale)
8095 return SDValue(); // Non-consecutive strided elements.
8098 // If we fail to find an input, we have a zero-shuffle which should always
8099 // have already been handled.
8100 // FIXME: Maybe handle this here in case during blending we end up with one?
8104 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8105 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8108 // The widest scale possible for extending is to a 64-bit integer.
8109 assert(Bits % 64 == 0 &&
8110 "The number of bits in a vector must be divisible by 64 on x86!");
8111 int NumExtElements = Bits / 64;
8113 // Each iteration, try extending the elements half as much, but into twice as
8115 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8116 assert(NumElements % NumExtElements == 0 &&
8117 "The input vector size must be divisible by the extended size.");
8118 if (SDValue V = Lower(NumElements / NumExtElements))
8122 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8126 // Returns one of the source operands if the shuffle can be reduced to a
8127 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8128 auto CanZExtLowHalf = [&]() {
8129 for (int i = NumElements / 2; i != NumElements; ++i)
8132 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8134 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8139 if (SDValue V = CanZExtLowHalf()) {
8140 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8141 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8142 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8145 // No viable ext lowering found.
8149 /// \brief Try to get a scalar value for a specific element of a vector.
8151 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8152 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8153 SelectionDAG &DAG) {
8154 MVT VT = V.getSimpleValueType();
8155 MVT EltVT = VT.getVectorElementType();
8156 while (V.getOpcode() == ISD::BITCAST)
8157 V = V.getOperand(0);
8158 // If the bitcasts shift the element size, we can't extract an equivalent
8160 MVT NewVT = V.getSimpleValueType();
8161 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8164 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8165 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8166 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8171 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8173 /// This is particularly important because the set of instructions varies
8174 /// significantly based on whether the operand is a load or not.
8175 static bool isShuffleFoldableLoad(SDValue V) {
8176 while (V.getOpcode() == ISD::BITCAST)
8177 V = V.getOperand(0);
8179 return ISD::isNON_EXTLoad(V.getNode());
8182 /// \brief Try to lower insertion of a single element into a zero vector.
8184 /// This is a common pattern that we have especially efficient patterns to lower
8185 /// across all subtarget feature sets.
8186 static SDValue lowerVectorShuffleAsElementInsertion(
8187 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8188 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8189 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8191 MVT EltVT = VT.getVectorElementType();
8193 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8194 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8196 bool IsV1Zeroable = true;
8197 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8198 if (i != V2Index && !Zeroable[i]) {
8199 IsV1Zeroable = false;
8203 // Check for a single input from a SCALAR_TO_VECTOR node.
8204 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8205 // all the smarts here sunk into that routine. However, the current
8206 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8207 // vector shuffle lowering is dead.
8208 if (SDValue V2S = getScalarValueForVectorElement(
8209 V2, Mask[V2Index] - Mask.size(), DAG)) {
8210 // We need to zext the scalar if it is smaller than an i32.
8211 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8212 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8213 // Using zext to expand a narrow element won't work for non-zero
8218 // Zero-extend directly to i32.
8220 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8222 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8223 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8224 EltVT == MVT::i16) {
8225 // Either not inserting from the low element of the input or the input
8226 // element size is too small to use VZEXT_MOVL to clear the high bits.
8230 if (!IsV1Zeroable) {
8231 // If V1 can't be treated as a zero vector we have fewer options to lower
8232 // this. We can't support integer vectors or non-zero targets cheaply, and
8233 // the V1 elements can't be permuted in any way.
8234 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8235 if (!VT.isFloatingPoint() || V2Index != 0)
8237 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8238 V1Mask[V2Index] = -1;
8239 if (!isNoopShuffleMask(V1Mask))
8241 // This is essentially a special case blend operation, but if we have
8242 // general purpose blend operations, they are always faster. Bail and let
8243 // the rest of the lowering handle these as blends.
8244 if (Subtarget->hasSSE41())
8247 // Otherwise, use MOVSD or MOVSS.
8248 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8249 "Only two types of floating point element types to handle!");
8250 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8254 // This lowering only works for the low element with floating point vectors.
8255 if (VT.isFloatingPoint() && V2Index != 0)
8258 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8260 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8263 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8264 // the desired position. Otherwise it is more efficient to do a vector
8265 // shift left. We know that we can do a vector shift left because all
8266 // the inputs are zero.
8267 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8268 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8269 V2Shuffle[V2Index] = 0;
8270 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8272 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8274 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8276 V2Index * EltVT.getSizeInBits()/8,
8277 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8278 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8284 /// \brief Try to lower broadcast of a single element.
8286 /// For convenience, this code also bundles all of the subtarget feature set
8287 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8288 /// a convenient way to factor it out.
8289 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8291 const X86Subtarget *Subtarget,
8292 SelectionDAG &DAG) {
8293 if (!Subtarget->hasAVX())
8295 if (VT.isInteger() && !Subtarget->hasAVX2())
8298 // Check that the mask is a broadcast.
8299 int BroadcastIdx = -1;
8301 if (M >= 0 && BroadcastIdx == -1)
8303 else if (M >= 0 && M != BroadcastIdx)
8306 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8307 "a sorted mask where the broadcast "
8310 // Go up the chain of (vector) values to try and find a scalar load that
8311 // we can combine with the broadcast.
8313 switch (V.getOpcode()) {
8314 case ISD::CONCAT_VECTORS: {
8315 int OperandSize = Mask.size() / V.getNumOperands();
8316 V = V.getOperand(BroadcastIdx / OperandSize);
8317 BroadcastIdx %= OperandSize;
8321 case ISD::INSERT_SUBVECTOR: {
8322 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8323 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8327 int BeginIdx = (int)ConstantIdx->getZExtValue();
8329 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8330 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8331 BroadcastIdx -= BeginIdx;
8342 // Check if this is a broadcast of a scalar. We special case lowering
8343 // for scalars so that we can more effectively fold with loads.
8344 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8345 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8346 V = V.getOperand(BroadcastIdx);
8348 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8350 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8352 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8353 // We can't broadcast from a vector register w/o AVX2, and we can only
8354 // broadcast from the zero-element of a vector register.
8358 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8361 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8362 // INSERTPS when the V1 elements are already in the correct locations
8363 // because otherwise we can just always use two SHUFPS instructions which
8364 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8365 // perform INSERTPS if a single V1 element is out of place and all V2
8366 // elements are zeroable.
8367 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8369 SelectionDAG &DAG) {
8370 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8371 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8372 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8373 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8375 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8378 int V1DstIndex = -1;
8379 int V2DstIndex = -1;
8380 bool V1UsedInPlace = false;
8382 for (int i = 0; i < 4; ++i) {
8383 // Synthesize a zero mask from the zeroable elements (includes undefs).
8389 // Flag if we use any V1 inputs in place.
8391 V1UsedInPlace = true;
8395 // We can only insert a single non-zeroable element.
8396 if (V1DstIndex != -1 || V2DstIndex != -1)
8400 // V1 input out of place for insertion.
8403 // V2 input for insertion.
8408 // Don't bother if we have no (non-zeroable) element for insertion.
8409 if (V1DstIndex == -1 && V2DstIndex == -1)
8412 // Determine element insertion src/dst indices. The src index is from the
8413 // start of the inserted vector, not the start of the concatenated vector.
8414 unsigned V2SrcIndex = 0;
8415 if (V1DstIndex != -1) {
8416 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8417 // and don't use the original V2 at all.
8418 V2SrcIndex = Mask[V1DstIndex];
8419 V2DstIndex = V1DstIndex;
8422 V2SrcIndex = Mask[V2DstIndex] - 4;
8425 // If no V1 inputs are used in place, then the result is created only from
8426 // the zero mask and the V2 insertion - so remove V1 dependency.
8428 V1 = DAG.getUNDEF(MVT::v4f32);
8430 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8431 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8433 // Insert the V2 element into the desired position.
8435 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8436 DAG.getConstant(InsertPSMask, MVT::i8));
8439 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
8440 /// UNPCK instruction.
8442 /// This specifically targets cases where we end up with alternating between
8443 /// the two inputs, and so can permute them into something that feeds a single
8444 /// UNPCK instruction. Note that this routine only targets integer vectors
8445 /// because for floating point vectors we have a generalized SHUFPS lowering
8446 /// strategy that handles everything that doesn't *exactly* match an unpack,
8447 /// making this clever lowering unnecessary.
8448 static SDValue lowerVectorShuffleAsUnpack(MVT VT, SDLoc DL, SDValue V1,
8449 SDValue V2, ArrayRef<int> Mask,
8450 SelectionDAG &DAG) {
8451 assert(!VT.isFloatingPoint() &&
8452 "This routine only supports integer vectors.");
8453 assert(!isSingleInputShuffleMask(Mask) &&
8454 "This routine should only be used when blending two inputs.");
8455 assert(Mask.size() >= 2 && "Single element masks are invalid.");
8457 int Size = Mask.size();
8459 int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
8460 return M >= 0 && M % Size < Size / 2;
8462 int NumHiInputs = std::count_if(
8463 Mask.begin(), Mask.end(), [Size](int M) { return M % Size >= Size / 2; });
8465 bool UnpackLo = NumLoInputs >= NumHiInputs;
8467 auto TryUnpack = [&](MVT UnpackVT, int Scale) {
8468 SmallVector<int, 32> V1Mask(Mask.size(), -1);
8469 SmallVector<int, 32> V2Mask(Mask.size(), -1);
8471 for (int i = 0; i < Size; ++i) {
8475 // Each element of the unpack contains Scale elements from this mask.
8476 int UnpackIdx = i / Scale;
8478 // We only handle the case where V1 feeds the first slots of the unpack.
8479 // We rely on canonicalization to ensure this is the case.
8480 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
8483 // Setup the mask for this input. The indexing is tricky as we have to
8484 // handle the unpack stride.
8485 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
8486 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
8490 // If we will have to shuffle both inputs to use the unpack, check whether
8491 // we can just unpack first and shuffle the result. If so, skip this unpack.
8492 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
8493 !isNoopShuffleMask(V2Mask))
8496 // Shuffle the inputs into place.
8497 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8498 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8500 // Cast the inputs to the type we will use to unpack them.
8501 V1 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V1);
8502 V2 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V2);
8504 // Unpack the inputs and cast the result back to the desired type.
8505 return DAG.getNode(ISD::BITCAST, DL, VT,
8506 DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
8507 DL, UnpackVT, V1, V2));
8510 // We try each unpack from the largest to the smallest to try and find one
8511 // that fits this mask.
8512 int OrigNumElements = VT.getVectorNumElements();
8513 int OrigScalarSize = VT.getScalarSizeInBits();
8514 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
8515 int Scale = ScalarSize / OrigScalarSize;
8516 int NumElements = OrigNumElements / Scale;
8517 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
8518 if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
8522 // If none of the unpack-rooted lowerings worked (or were profitable) try an
8524 if (NumLoInputs == 0 || NumHiInputs == 0) {
8525 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
8526 "We have to have *some* inputs!");
8527 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
8529 // FIXME: We could consider the total complexity of the permute of each
8530 // possible unpacking. Or at the least we should consider how many
8531 // half-crossings are created.
8532 // FIXME: We could consider commuting the unpacks.
8534 SmallVector<int, 32> PermMask;
8535 PermMask.assign(Size, -1);
8536 for (int i = 0; i < Size; ++i) {
8540 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
8543 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
8545 return DAG.getVectorShuffle(
8546 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
8548 DAG.getUNDEF(VT), PermMask);
8554 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8556 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8557 /// support for floating point shuffles but not integer shuffles. These
8558 /// instructions will incur a domain crossing penalty on some chips though so
8559 /// it is better to avoid lowering through this for integer vectors where
8561 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8562 const X86Subtarget *Subtarget,
8563 SelectionDAG &DAG) {
8565 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8566 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8567 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8568 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8569 ArrayRef<int> Mask = SVOp->getMask();
8570 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8572 if (isSingleInputShuffleMask(Mask)) {
8573 // Use low duplicate instructions for masks that match their pattern.
8574 if (Subtarget->hasSSE3())
8575 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8576 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8578 // Straight shuffle of a single input vector. Simulate this by using the
8579 // single input as both of the "inputs" to this instruction..
8580 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8582 if (Subtarget->hasAVX()) {
8583 // If we have AVX, we can use VPERMILPS which will allow folding a load
8584 // into the shuffle.
8585 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8586 DAG.getConstant(SHUFPDMask, MVT::i8));
8589 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8590 DAG.getConstant(SHUFPDMask, MVT::i8));
8592 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8593 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8595 // If we have a single input, insert that into V1 if we can do so cheaply.
8596 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8597 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8598 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8600 // Try inverting the insertion since for v2 masks it is easy to do and we
8601 // can't reliably sort the mask one way or the other.
8602 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8603 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8604 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8605 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8609 // Try to use one of the special instruction patterns to handle two common
8610 // blend patterns if a zero-blend above didn't work.
8611 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8612 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8613 // We can either use a special instruction to load over the low double or
8614 // to move just the low double.
8616 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8618 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8620 if (Subtarget->hasSSE41())
8621 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8625 // Use dedicated unpack instructions for masks that match their pattern.
8626 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8627 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8628 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8629 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8631 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8632 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8633 DAG.getConstant(SHUFPDMask, MVT::i8));
8636 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8638 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8639 /// the integer unit to minimize domain crossing penalties. However, for blends
8640 /// it falls back to the floating point shuffle operation with appropriate bit
8642 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8643 const X86Subtarget *Subtarget,
8644 SelectionDAG &DAG) {
8646 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8647 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8648 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8649 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8650 ArrayRef<int> Mask = SVOp->getMask();
8651 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8653 if (isSingleInputShuffleMask(Mask)) {
8654 // Check for being able to broadcast a single element.
8655 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8656 Mask, Subtarget, DAG))
8659 // Straight shuffle of a single input vector. For everything from SSE2
8660 // onward this has a single fast instruction with no scary immediates.
8661 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8662 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8663 int WidenedMask[4] = {
8664 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8665 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8667 ISD::BITCAST, DL, MVT::v2i64,
8668 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8669 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8671 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
8672 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
8673 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
8674 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
8676 // If we have a blend of two PACKUS operations an the blend aligns with the
8677 // low and half halves, we can just merge the PACKUS operations. This is
8678 // particularly important as it lets us merge shuffles that this routine itself
8680 auto GetPackNode = [](SDValue V) {
8681 while (V.getOpcode() == ISD::BITCAST)
8682 V = V.getOperand(0);
8684 return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
8686 if (SDValue V1Pack = GetPackNode(V1))
8687 if (SDValue V2Pack = GetPackNode(V2))
8688 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8689 DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8,
8690 Mask[0] == 0 ? V1Pack.getOperand(0)
8691 : V1Pack.getOperand(1),
8692 Mask[1] == 2 ? V2Pack.getOperand(0)
8693 : V2Pack.getOperand(1)));
8695 // Try to use shift instructions.
8697 lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
8700 // When loading a scalar and then shuffling it into a vector we can often do
8701 // the insertion cheaply.
8702 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8703 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8705 // Try inverting the insertion since for v2 masks it is easy to do and we
8706 // can't reliably sort the mask one way or the other.
8707 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
8708 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8709 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8712 // We have different paths for blend lowering, but they all must use the
8713 // *exact* same predicate.
8714 bool IsBlendSupported = Subtarget->hasSSE41();
8715 if (IsBlendSupported)
8716 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8720 // Use dedicated unpack instructions for masks that match their pattern.
8721 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8722 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8723 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8724 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8726 // Try to use byte rotation instructions.
8727 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8728 if (Subtarget->hasSSSE3())
8729 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8730 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8733 // If we have direct support for blends, we should lower by decomposing into
8734 // a permute. That will be faster than the domain cross.
8735 if (IsBlendSupported)
8736 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8739 // We implement this with SHUFPD which is pretty lame because it will likely
8740 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8741 // However, all the alternatives are still more cycles and newer chips don't
8742 // have this problem. It would be really nice if x86 had better shuffles here.
8743 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8744 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8745 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8746 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8749 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8751 /// This is used to disable more specialized lowerings when the shufps lowering
8752 /// will happen to be efficient.
8753 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8754 // This routine only handles 128-bit shufps.
8755 assert(Mask.size() == 4 && "Unsupported mask size!");
8757 // To lower with a single SHUFPS we need to have the low half and high half
8758 // each requiring a single input.
8759 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8761 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8767 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8769 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8770 /// It makes no assumptions about whether this is the *best* lowering, it simply
8772 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8773 ArrayRef<int> Mask, SDValue V1,
8774 SDValue V2, SelectionDAG &DAG) {
8775 SDValue LowV = V1, HighV = V2;
8776 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8779 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8781 if (NumV2Elements == 1) {
8783 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8786 // Compute the index adjacent to V2Index and in the same half by toggling
8788 int V2AdjIndex = V2Index ^ 1;
8790 if (Mask[V2AdjIndex] == -1) {
8791 // Handles all the cases where we have a single V2 element and an undef.
8792 // This will only ever happen in the high lanes because we commute the
8793 // vector otherwise.
8795 std::swap(LowV, HighV);
8796 NewMask[V2Index] -= 4;
8798 // Handle the case where the V2 element ends up adjacent to a V1 element.
8799 // To make this work, blend them together as the first step.
8800 int V1Index = V2AdjIndex;
8801 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8802 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8803 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8805 // Now proceed to reconstruct the final blend as we have the necessary
8806 // high or low half formed.
8813 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8814 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8816 } else if (NumV2Elements == 2) {
8817 if (Mask[0] < 4 && Mask[1] < 4) {
8818 // Handle the easy case where we have V1 in the low lanes and V2 in the
8822 } else if (Mask[2] < 4 && Mask[3] < 4) {
8823 // We also handle the reversed case because this utility may get called
8824 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8825 // arrange things in the right direction.
8831 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8832 // trying to place elements directly, just blend them and set up the final
8833 // shuffle to place them.
8835 // The first two blend mask elements are for V1, the second two are for
8837 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8838 Mask[2] < 4 ? Mask[2] : Mask[3],
8839 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8840 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8841 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8842 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8844 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8847 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8848 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8849 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8850 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8853 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8854 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8857 /// \brief Lower 4-lane 32-bit floating point shuffles.
8859 /// Uses instructions exclusively from the floating point unit to minimize
8860 /// domain crossing penalties, as these are sufficient to implement all v4f32
8862 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8863 const X86Subtarget *Subtarget,
8864 SelectionDAG &DAG) {
8866 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8867 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8868 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8869 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8870 ArrayRef<int> Mask = SVOp->getMask();
8871 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8874 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8876 if (NumV2Elements == 0) {
8877 // Check for being able to broadcast a single element.
8878 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8879 Mask, Subtarget, DAG))
8882 // Use even/odd duplicate instructions for masks that match their pattern.
8883 if (Subtarget->hasSSE3()) {
8884 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8885 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8886 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8887 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8890 if (Subtarget->hasAVX()) {
8891 // If we have AVX, we can use VPERMILPS which will allow folding a load
8892 // into the shuffle.
8893 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8894 getV4X86ShuffleImm8ForMask(Mask, DAG));
8897 // Otherwise, use a straight shuffle of a single input vector. We pass the
8898 // input vector to both operands to simulate this with a SHUFPS.
8899 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8900 getV4X86ShuffleImm8ForMask(Mask, DAG));
8903 // There are special ways we can lower some single-element blends. However, we
8904 // have custom ways we can lower more complex single-element blends below that
8905 // we defer to if both this and BLENDPS fail to match, so restrict this to
8906 // when the V2 input is targeting element 0 of the mask -- that is the fast
8908 if (NumV2Elements == 1 && Mask[0] >= 4)
8909 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8910 Mask, Subtarget, DAG))
8913 if (Subtarget->hasSSE41()) {
8914 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8918 // Use INSERTPS if we can complete the shuffle efficiently.
8919 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8922 if (!isSingleSHUFPSMask(Mask))
8923 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8924 DL, MVT::v4f32, V1, V2, Mask, DAG))
8928 // Use dedicated unpack instructions for masks that match their pattern.
8929 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8930 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8931 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8932 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8933 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
8934 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V2, V1);
8935 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
8936 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V2, V1);
8938 // Otherwise fall back to a SHUFPS lowering strategy.
8939 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8942 /// \brief Lower 4-lane i32 vector shuffles.
8944 /// We try to handle these with integer-domain shuffles where we can, but for
8945 /// blends we use the floating point domain blend instructions.
8946 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8947 const X86Subtarget *Subtarget,
8948 SelectionDAG &DAG) {
8950 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8951 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8952 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8953 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8954 ArrayRef<int> Mask = SVOp->getMask();
8955 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8957 // Whenever we can lower this as a zext, that instruction is strictly faster
8958 // than any alternative. It also allows us to fold memory operands into the
8959 // shuffle in many cases.
8960 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8961 Mask, Subtarget, DAG))
8965 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8967 if (NumV2Elements == 0) {
8968 // Check for being able to broadcast a single element.
8969 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8970 Mask, Subtarget, DAG))
8973 // Straight shuffle of a single input vector. For everything from SSE2
8974 // onward this has a single fast instruction with no scary immediates.
8975 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8976 // but we aren't actually going to use the UNPCK instruction because doing
8977 // so prevents folding a load into this instruction or making a copy.
8978 const int UnpackLoMask[] = {0, 0, 1, 1};
8979 const int UnpackHiMask[] = {2, 2, 3, 3};
8980 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8981 Mask = UnpackLoMask;
8982 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8983 Mask = UnpackHiMask;
8985 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8986 getV4X86ShuffleImm8ForMask(Mask, DAG));
8989 // Try to use shift instructions.
8991 lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
8994 // There are special ways we can lower some single-element blends.
8995 if (NumV2Elements == 1)
8996 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8997 Mask, Subtarget, DAG))
9000 // We have different paths for blend lowering, but they all must use the
9001 // *exact* same predicate.
9002 bool IsBlendSupported = Subtarget->hasSSE41();
9003 if (IsBlendSupported)
9004 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
9008 if (SDValue Masked =
9009 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
9012 // Use dedicated unpack instructions for masks that match their pattern.
9013 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
9014 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
9015 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
9016 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
9017 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
9018 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V2, V1);
9019 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
9020 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V2, V1);
9022 // Try to use byte rotation instructions.
9023 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
9024 if (Subtarget->hasSSSE3())
9025 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9026 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
9029 // If we have direct support for blends, we should lower by decomposing into
9030 // a permute. That will be faster than the domain cross.
9031 if (IsBlendSupported)
9032 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
9035 // Try to lower by permuting the inputs into an unpack instruction.
9036 if (SDValue Unpack =
9037 lowerVectorShuffleAsUnpack(MVT::v4i32, DL, V1, V2, Mask, DAG))
9040 // We implement this with SHUFPS because it can blend from two vectors.
9041 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
9042 // up the inputs, bypassing domain shift penalties that we would encur if we
9043 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
9045 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
9046 DAG.getVectorShuffle(
9048 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
9049 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
9052 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
9053 /// shuffle lowering, and the most complex part.
9055 /// The lowering strategy is to try to form pairs of input lanes which are
9056 /// targeted at the same half of the final vector, and then use a dword shuffle
9057 /// to place them onto the right half, and finally unpack the paired lanes into
9058 /// their final position.
9060 /// The exact breakdown of how to form these dword pairs and align them on the
9061 /// correct sides is really tricky. See the comments within the function for
9062 /// more of the details.
9063 static SDValue lowerV8I16SingleInputVectorShuffle(
9064 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
9065 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
9066 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9067 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
9068 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
9070 SmallVector<int, 4> LoInputs;
9071 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
9072 [](int M) { return M >= 0; });
9073 std::sort(LoInputs.begin(), LoInputs.end());
9074 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
9075 SmallVector<int, 4> HiInputs;
9076 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
9077 [](int M) { return M >= 0; });
9078 std::sort(HiInputs.begin(), HiInputs.end());
9079 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
9081 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
9082 int NumHToL = LoInputs.size() - NumLToL;
9084 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
9085 int NumHToH = HiInputs.size() - NumLToH;
9086 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
9087 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
9088 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
9089 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
9091 // Check for being able to broadcast a single element.
9092 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
9093 Mask, Subtarget, DAG))
9096 // Try to use shift instructions.
9098 lowerVectorShuffleAsShift(DL, MVT::v8i16, V, V, Mask, DAG))
9101 // Use dedicated unpack instructions for masks that match their pattern.
9102 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
9103 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
9104 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
9105 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
9107 // Try to use byte rotation instructions.
9108 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9109 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
9112 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
9113 // such inputs we can swap two of the dwords across the half mark and end up
9114 // with <=2 inputs to each half in each half. Once there, we can fall through
9115 // to the generic code below. For example:
9117 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9118 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9120 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9121 // and an existing 2-into-2 on the other half. In this case we may have to
9122 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9123 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9124 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9125 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9126 // half than the one we target for fixing) will be fixed when we re-enter this
9127 // path. We will also combine away any sequence of PSHUFD instructions that
9128 // result into a single instruction. Here is an example of the tricky case:
9130 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9131 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9133 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9135 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9136 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9138 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9139 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9141 // The result is fine to be handled by the generic logic.
9142 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9143 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9144 int AOffset, int BOffset) {
9145 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9146 "Must call this with A having 3 or 1 inputs from the A half.");
9147 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9148 "Must call this with B having 1 or 3 inputs from the B half.");
9149 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9150 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9152 // Compute the index of dword with only one word among the three inputs in
9153 // a half by taking the sum of the half with three inputs and subtracting
9154 // the sum of the actual three inputs. The difference is the remaining
9157 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9158 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9159 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9160 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9161 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9162 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9163 int TripleNonInputIdx =
9164 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9165 TripleDWord = TripleNonInputIdx / 2;
9167 // We use xor with one to compute the adjacent DWord to whichever one the
9169 OneInputDWord = (OneInput / 2) ^ 1;
9171 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9172 // and BToA inputs. If there is also such a problem with the BToB and AToB
9173 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9174 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9175 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9176 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9177 // Compute how many inputs will be flipped by swapping these DWords. We
9179 // to balance this to ensure we don't form a 3-1 shuffle in the other
9181 int NumFlippedAToBInputs =
9182 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9183 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9184 int NumFlippedBToBInputs =
9185 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9186 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9187 if ((NumFlippedAToBInputs == 1 &&
9188 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9189 (NumFlippedBToBInputs == 1 &&
9190 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9191 // We choose whether to fix the A half or B half based on whether that
9192 // half has zero flipped inputs. At zero, we may not be able to fix it
9193 // with that half. We also bias towards fixing the B half because that
9194 // will more commonly be the high half, and we have to bias one way.
9195 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9196 ArrayRef<int> Inputs) {
9197 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9198 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9199 PinnedIdx ^ 1) != Inputs.end();
9200 // Determine whether the free index is in the flipped dword or the
9201 // unflipped dword based on where the pinned index is. We use this bit
9202 // in an xor to conditionally select the adjacent dword.
9203 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9204 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9205 FixFreeIdx) != Inputs.end();
9206 if (IsFixIdxInput == IsFixFreeIdxInput)
9208 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9209 FixFreeIdx) != Inputs.end();
9210 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9211 "We need to be changing the number of flipped inputs!");
9212 int PSHUFHalfMask[] = {0, 1, 2, 3};
9213 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9214 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9216 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9219 if (M != -1 && M == FixIdx)
9221 else if (M != -1 && M == FixFreeIdx)
9224 if (NumFlippedBToBInputs != 0) {
9226 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9227 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9229 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9231 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9232 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9237 int PSHUFDMask[] = {0, 1, 2, 3};
9238 PSHUFDMask[ADWord] = BDWord;
9239 PSHUFDMask[BDWord] = ADWord;
9240 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9241 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9242 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9243 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9245 // Adjust the mask to match the new locations of A and B.
9247 if (M != -1 && M/2 == ADWord)
9248 M = 2 * BDWord + M % 2;
9249 else if (M != -1 && M/2 == BDWord)
9250 M = 2 * ADWord + M % 2;
9252 // Recurse back into this routine to re-compute state now that this isn't
9253 // a 3 and 1 problem.
9254 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9257 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9258 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9259 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9260 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9262 // At this point there are at most two inputs to the low and high halves from
9263 // each half. That means the inputs can always be grouped into dwords and
9264 // those dwords can then be moved to the correct half with a dword shuffle.
9265 // We use at most one low and one high word shuffle to collect these paired
9266 // inputs into dwords, and finally a dword shuffle to place them.
9267 int PSHUFLMask[4] = {-1, -1, -1, -1};
9268 int PSHUFHMask[4] = {-1, -1, -1, -1};
9269 int PSHUFDMask[4] = {-1, -1, -1, -1};
9271 // First fix the masks for all the inputs that are staying in their
9272 // original halves. This will then dictate the targets of the cross-half
9274 auto fixInPlaceInputs =
9275 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9276 MutableArrayRef<int> SourceHalfMask,
9277 MutableArrayRef<int> HalfMask, int HalfOffset) {
9278 if (InPlaceInputs.empty())
9280 if (InPlaceInputs.size() == 1) {
9281 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9282 InPlaceInputs[0] - HalfOffset;
9283 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9286 if (IncomingInputs.empty()) {
9287 // Just fix all of the in place inputs.
9288 for (int Input : InPlaceInputs) {
9289 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9290 PSHUFDMask[Input / 2] = Input / 2;
9295 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9296 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9297 InPlaceInputs[0] - HalfOffset;
9298 // Put the second input next to the first so that they are packed into
9299 // a dword. We find the adjacent index by toggling the low bit.
9300 int AdjIndex = InPlaceInputs[0] ^ 1;
9301 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9302 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9303 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9305 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9306 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9308 // Now gather the cross-half inputs and place them into a free dword of
9309 // their target half.
9310 // FIXME: This operation could almost certainly be simplified dramatically to
9311 // look more like the 3-1 fixing operation.
9312 auto moveInputsToRightHalf = [&PSHUFDMask](
9313 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9314 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9315 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9317 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9318 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9320 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9322 int LowWord = Word & ~1;
9323 int HighWord = Word | 1;
9324 return isWordClobbered(SourceHalfMask, LowWord) ||
9325 isWordClobbered(SourceHalfMask, HighWord);
9328 if (IncomingInputs.empty())
9331 if (ExistingInputs.empty()) {
9332 // Map any dwords with inputs from them into the right half.
9333 for (int Input : IncomingInputs) {
9334 // If the source half mask maps over the inputs, turn those into
9335 // swaps and use the swapped lane.
9336 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9337 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9338 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9339 Input - SourceOffset;
9340 // We have to swap the uses in our half mask in one sweep.
9341 for (int &M : HalfMask)
9342 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9344 else if (M == Input)
9345 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9347 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9348 Input - SourceOffset &&
9349 "Previous placement doesn't match!");
9351 // Note that this correctly re-maps both when we do a swap and when
9352 // we observe the other side of the swap above. We rely on that to
9353 // avoid swapping the members of the input list directly.
9354 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9357 // Map the input's dword into the correct half.
9358 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9359 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9361 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9363 "Previous placement doesn't match!");
9366 // And just directly shift any other-half mask elements to be same-half
9367 // as we will have mirrored the dword containing the element into the
9368 // same position within that half.
9369 for (int &M : HalfMask)
9370 if (M >= SourceOffset && M < SourceOffset + 4) {
9371 M = M - SourceOffset + DestOffset;
9372 assert(M >= 0 && "This should never wrap below zero!");
9377 // Ensure we have the input in a viable dword of its current half. This
9378 // is particularly tricky because the original position may be clobbered
9379 // by inputs being moved and *staying* in that half.
9380 if (IncomingInputs.size() == 1) {
9381 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9382 int InputFixed = std::find(std::begin(SourceHalfMask),
9383 std::end(SourceHalfMask), -1) -
9384 std::begin(SourceHalfMask) + SourceOffset;
9385 SourceHalfMask[InputFixed - SourceOffset] =
9386 IncomingInputs[0] - SourceOffset;
9387 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9389 IncomingInputs[0] = InputFixed;
9391 } else if (IncomingInputs.size() == 2) {
9392 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9393 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9394 // We have two non-adjacent or clobbered inputs we need to extract from
9395 // the source half. To do this, we need to map them into some adjacent
9396 // dword slot in the source mask.
9397 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9398 IncomingInputs[1] - SourceOffset};
9400 // If there is a free slot in the source half mask adjacent to one of
9401 // the inputs, place the other input in it. We use (Index XOR 1) to
9402 // compute an adjacent index.
9403 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9404 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9405 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9406 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9407 InputsFixed[1] = InputsFixed[0] ^ 1;
9408 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9409 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9410 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9411 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9412 InputsFixed[0] = InputsFixed[1] ^ 1;
9413 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9414 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9415 // The two inputs are in the same DWord but it is clobbered and the
9416 // adjacent DWord isn't used at all. Move both inputs to the free
9418 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9419 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9420 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9421 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9423 // The only way we hit this point is if there is no clobbering
9424 // (because there are no off-half inputs to this half) and there is no
9425 // free slot adjacent to one of the inputs. In this case, we have to
9426 // swap an input with a non-input.
9427 for (int i = 0; i < 4; ++i)
9428 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9429 "We can't handle any clobbers here!");
9430 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9431 "Cannot have adjacent inputs here!");
9433 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9434 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9436 // We also have to update the final source mask in this case because
9437 // it may need to undo the above swap.
9438 for (int &M : FinalSourceHalfMask)
9439 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9440 M = InputsFixed[1] + SourceOffset;
9441 else if (M == InputsFixed[1] + SourceOffset)
9442 M = (InputsFixed[0] ^ 1) + SourceOffset;
9444 InputsFixed[1] = InputsFixed[0] ^ 1;
9447 // Point everything at the fixed inputs.
9448 for (int &M : HalfMask)
9449 if (M == IncomingInputs[0])
9450 M = InputsFixed[0] + SourceOffset;
9451 else if (M == IncomingInputs[1])
9452 M = InputsFixed[1] + SourceOffset;
9454 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9455 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9458 llvm_unreachable("Unhandled input size!");
9461 // Now hoist the DWord down to the right half.
9462 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9463 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9464 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9465 for (int &M : HalfMask)
9466 for (int Input : IncomingInputs)
9468 M = FreeDWord * 2 + Input % 2;
9470 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9471 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9472 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9473 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9475 // Now enact all the shuffles we've computed to move the inputs into their
9477 if (!isNoopShuffleMask(PSHUFLMask))
9478 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9479 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9480 if (!isNoopShuffleMask(PSHUFHMask))
9481 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9482 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9483 if (!isNoopShuffleMask(PSHUFDMask))
9484 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9485 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9486 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9487 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9489 // At this point, each half should contain all its inputs, and we can then
9490 // just shuffle them into their final position.
9491 assert(std::count_if(LoMask.begin(), LoMask.end(),
9492 [](int M) { return M >= 4; }) == 0 &&
9493 "Failed to lift all the high half inputs to the low mask!");
9494 assert(std::count_if(HiMask.begin(), HiMask.end(),
9495 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9496 "Failed to lift all the low half inputs to the high mask!");
9498 // Do a half shuffle for the low mask.
9499 if (!isNoopShuffleMask(LoMask))
9500 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9501 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9503 // Do a half shuffle with the high mask after shifting its values down.
9504 for (int &M : HiMask)
9507 if (!isNoopShuffleMask(HiMask))
9508 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9509 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9514 /// \brief Helper to form a PSHUFB-based shuffle+blend.
9515 static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
9516 SDValue V2, ArrayRef<int> Mask,
9517 SelectionDAG &DAG, bool &V1InUse,
9519 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9525 int Size = Mask.size();
9526 int Scale = 16 / Size;
9527 for (int i = 0; i < 16; ++i) {
9528 if (Mask[i / Scale] == -1) {
9529 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9531 const int ZeroMask = 0x80;
9532 int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
9534 int V2Idx = Mask[i / Scale] < Size
9536 : (Mask[i / Scale] - Size) * Scale + i % Scale;
9537 if (Zeroable[i / Scale])
9538 V1Idx = V2Idx = ZeroMask;
9539 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9540 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9541 V1InUse |= (ZeroMask != V1Idx);
9542 V2InUse |= (ZeroMask != V2Idx);
9547 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9548 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V1),
9549 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9551 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9552 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V2),
9553 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9555 // If we need shuffled inputs from both, blend the two.
9557 if (V1InUse && V2InUse)
9558 V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9560 V = V1InUse ? V1 : V2;
9562 // Cast the result back to the correct type.
9563 return DAG.getNode(ISD::BITCAST, DL, VT, V);
9566 /// \brief Generic lowering of 8-lane i16 shuffles.
9568 /// This handles both single-input shuffles and combined shuffle/blends with
9569 /// two inputs. The single input shuffles are immediately delegated to
9570 /// a dedicated lowering routine.
9572 /// The blends are lowered in one of three fundamental ways. If there are few
9573 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9574 /// of the input is significantly cheaper when lowered as an interleaving of
9575 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9576 /// halves of the inputs separately (making them have relatively few inputs)
9577 /// and then concatenate them.
9578 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9579 const X86Subtarget *Subtarget,
9580 SelectionDAG &DAG) {
9582 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9583 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9584 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9585 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9586 ArrayRef<int> OrigMask = SVOp->getMask();
9587 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9588 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9589 MutableArrayRef<int> Mask(MaskStorage);
9591 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9593 // Whenever we can lower this as a zext, that instruction is strictly faster
9594 // than any alternative.
9595 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9596 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9599 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9600 auto isV2 = [](int M) { return M >= 8; };
9602 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9603 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9605 if (NumV2Inputs == 0)
9606 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9608 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9609 "to be V1-input shuffles.");
9611 // Try to use shift instructions.
9613 lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
9616 // There are special ways we can lower some single-element blends.
9617 if (NumV2Inputs == 1)
9618 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9619 Mask, Subtarget, DAG))
9622 // We have different paths for blend lowering, but they all must use the
9623 // *exact* same predicate.
9624 bool IsBlendSupported = Subtarget->hasSSE41();
9625 if (IsBlendSupported)
9626 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9630 if (SDValue Masked =
9631 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9634 // Use dedicated unpack instructions for masks that match their pattern.
9635 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9636 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9637 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9638 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9640 // Try to use byte rotation instructions.
9641 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9642 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9645 if (SDValue BitBlend =
9646 lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
9649 if (SDValue Unpack =
9650 lowerVectorShuffleAsUnpack(MVT::v8i16, DL, V1, V2, Mask, DAG))
9653 // If we can't directly blend but can use PSHUFB, that will be better as it
9654 // can both shuffle and set up the inefficient blend.
9655 if (!IsBlendSupported && Subtarget->hasSSSE3()) {
9656 bool V1InUse, V2InUse;
9657 return lowerVectorShuffleAsPSHUFB(DL, MVT::v8i16, V1, V2, Mask, DAG,
9661 // We can always bit-blend if we have to so the fallback strategy is to
9662 // decompose into single-input permutes and blends.
9663 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9667 /// \brief Check whether a compaction lowering can be done by dropping even
9668 /// elements and compute how many times even elements must be dropped.
9670 /// This handles shuffles which take every Nth element where N is a power of
9671 /// two. Example shuffle masks:
9673 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9674 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9675 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9676 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9677 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9678 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9680 /// Any of these lanes can of course be undef.
9682 /// This routine only supports N <= 3.
9683 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9686 /// \returns N above, or the number of times even elements must be dropped if
9687 /// there is such a number. Otherwise returns zero.
9688 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9689 // Figure out whether we're looping over two inputs or just one.
9690 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9692 // The modulus for the shuffle vector entries is based on whether this is
9693 // a single input or not.
9694 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9695 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9696 "We should only be called with masks with a power-of-2 size!");
9698 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9700 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9701 // and 2^3 simultaneously. This is because we may have ambiguity with
9702 // partially undef inputs.
9703 bool ViableForN[3] = {true, true, true};
9705 for (int i = 0, e = Mask.size(); i < e; ++i) {
9706 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9711 bool IsAnyViable = false;
9712 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9713 if (ViableForN[j]) {
9716 // The shuffle mask must be equal to (i * 2^N) % M.
9717 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9720 ViableForN[j] = false;
9722 // Early exit if we exhaust the possible powers of two.
9727 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9731 // Return 0 as there is no viable power of two.
9735 /// \brief Generic lowering of v16i8 shuffles.
9737 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9738 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9739 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9740 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9742 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9743 const X86Subtarget *Subtarget,
9744 SelectionDAG &DAG) {
9746 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9747 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9748 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9749 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9750 ArrayRef<int> Mask = SVOp->getMask();
9751 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9753 // Try to use shift instructions.
9755 lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, DAG))
9758 // Try to use byte rotation instructions.
9759 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9760 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
9763 // Try to use a zext lowering.
9764 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9765 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
9769 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9771 // For single-input shuffles, there are some nicer lowering tricks we can use.
9772 if (NumV2Elements == 0) {
9773 // Check for being able to broadcast a single element.
9774 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9775 Mask, Subtarget, DAG))
9778 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9779 // Notably, this handles splat and partial-splat shuffles more efficiently.
9780 // However, it only makes sense if the pre-duplication shuffle simplifies
9781 // things significantly. Currently, this means we need to be able to
9782 // express the pre-duplication shuffle as an i16 shuffle.
9784 // FIXME: We should check for other patterns which can be widened into an
9785 // i16 shuffle as well.
9786 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9787 for (int i = 0; i < 16; i += 2)
9788 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9793 auto tryToWidenViaDuplication = [&]() -> SDValue {
9794 if (!canWidenViaDuplication(Mask))
9796 SmallVector<int, 4> LoInputs;
9797 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9798 [](int M) { return M >= 0 && M < 8; });
9799 std::sort(LoInputs.begin(), LoInputs.end());
9800 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9802 SmallVector<int, 4> HiInputs;
9803 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9804 [](int M) { return M >= 8; });
9805 std::sort(HiInputs.begin(), HiInputs.end());
9806 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9809 bool TargetLo = LoInputs.size() >= HiInputs.size();
9810 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9811 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9813 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9814 SmallDenseMap<int, int, 8> LaneMap;
9815 for (int I : InPlaceInputs) {
9816 PreDupI16Shuffle[I/2] = I/2;
9819 int j = TargetLo ? 0 : 4, je = j + 4;
9820 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9821 // Check if j is already a shuffle of this input. This happens when
9822 // there are two adjacent bytes after we move the low one.
9823 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9824 // If we haven't yet mapped the input, search for a slot into which
9826 while (j < je && PreDupI16Shuffle[j] != -1)
9830 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9833 // Map this input with the i16 shuffle.
9834 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9837 // Update the lane map based on the mapping we ended up with.
9838 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9841 ISD::BITCAST, DL, MVT::v16i8,
9842 DAG.getVectorShuffle(MVT::v8i16, DL,
9843 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9844 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9846 // Unpack the bytes to form the i16s that will be shuffled into place.
9847 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9848 MVT::v16i8, V1, V1);
9850 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9851 for (int i = 0; i < 16; ++i)
9852 if (Mask[i] != -1) {
9853 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9854 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9855 if (PostDupI16Shuffle[i / 2] == -1)
9856 PostDupI16Shuffle[i / 2] = MappedMask;
9858 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9859 "Conflicting entrties in the original shuffle!");
9862 ISD::BITCAST, DL, MVT::v16i8,
9863 DAG.getVectorShuffle(MVT::v8i16, DL,
9864 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9865 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9867 if (SDValue V = tryToWidenViaDuplication())
9871 // Use dedicated unpack instructions for masks that match their pattern.
9872 if (isShuffleEquivalent(V1, V2, Mask,
9873 0, 16, 1, 17, 2, 18, 3, 19,
9874 4, 20, 5, 21, 6, 22, 7, 23))
9875 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V1, V2);
9876 if (isShuffleEquivalent(V1, V2, Mask,
9877 8, 24, 9, 25, 10, 26, 11, 27,
9878 12, 28, 13, 29, 14, 30, 15, 31))
9879 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V1, V2);
9881 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9882 // with PSHUFB. It is important to do this before we attempt to generate any
9883 // blends but after all of the single-input lowerings. If the single input
9884 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9885 // want to preserve that and we can DAG combine any longer sequences into
9886 // a PSHUFB in the end. But once we start blending from multiple inputs,
9887 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9888 // and there are *very* few patterns that would actually be faster than the
9889 // PSHUFB approach because of its ability to zero lanes.
9891 // FIXME: The only exceptions to the above are blends which are exact
9892 // interleavings with direct instructions supporting them. We currently don't
9893 // handle those well here.
9894 if (Subtarget->hasSSSE3()) {
9895 bool V1InUse = false;
9896 bool V2InUse = false;
9898 SDValue PSHUFB = lowerVectorShuffleAsPSHUFB(DL, MVT::v16i8, V1, V2, Mask,
9899 DAG, V1InUse, V2InUse);
9901 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
9902 // do so. This avoids using them to handle blends-with-zero which is
9903 // important as a single pshufb is significantly faster for that.
9904 if (V1InUse && V2InUse) {
9905 if (Subtarget->hasSSE41())
9906 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
9907 Mask, Subtarget, DAG))
9910 // We can use an unpack to do the blending rather than an or in some
9911 // cases. Even though the or may be (very minorly) more efficient, we
9912 // preference this lowering because there are common cases where part of
9913 // the complexity of the shuffles goes away when we do the final blend as
9915 // FIXME: It might be worth trying to detect if the unpack-feeding
9916 // shuffles will both be pshufb, in which case we shouldn't bother with
9918 if (SDValue Unpack =
9919 lowerVectorShuffleAsUnpack(MVT::v16i8, DL, V1, V2, Mask, DAG))
9926 // There are special ways we can lower some single-element blends.
9927 if (NumV2Elements == 1)
9928 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9929 Mask, Subtarget, DAG))
9932 if (SDValue BitBlend =
9933 lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
9936 // Check whether a compaction lowering can be done. This handles shuffles
9937 // which take every Nth element for some even N. See the helper function for
9940 // We special case these as they can be particularly efficiently handled with
9941 // the PACKUSB instruction on x86 and they show up in common patterns of
9942 // rearranging bytes to truncate wide elements.
9943 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9944 // NumEvenDrops is the power of two stride of the elements. Another way of
9945 // thinking about it is that we need to drop the even elements this many
9946 // times to get the original input.
9947 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9949 // First we need to zero all the dropped bytes.
9950 assert(NumEvenDrops <= 3 &&
9951 "No support for dropping even elements more than 3 times.");
9952 // We use the mask type to pick which bytes are preserved based on how many
9953 // elements are dropped.
9954 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9955 SDValue ByteClearMask =
9956 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9957 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9958 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9960 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9962 // Now pack things back together.
9963 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9964 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9965 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9966 for (int i = 1; i < NumEvenDrops; ++i) {
9967 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9968 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9974 // Handle multi-input cases by blending single-input shuffles.
9975 if (NumV2Elements > 0)
9976 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
9979 // The fallback path for single-input shuffles widens this into two v8i16
9980 // vectors with unpacks, shuffles those, and then pulls them back together
9984 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9985 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9986 for (int i = 0; i < 16; ++i)
9988 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
9990 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9992 SDValue VLoHalf, VHiHalf;
9993 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
9994 // them out and avoid using UNPCK{L,H} to extract the elements of V as
9996 if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
9997 [](int M) { return M >= 0 && M % 2 == 1; }) &&
9998 std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
9999 [](int M) { return M >= 0 && M % 2 == 1; })) {
10000 // Use a mask to drop the high bytes.
10001 VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10002 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
10003 DAG.getConstant(0x00FF, MVT::v8i16));
10005 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
10006 VHiHalf = DAG.getUNDEF(MVT::v8i16);
10008 // Squash the masks to point directly into VLoHalf.
10009 for (int &M : LoBlendMask)
10012 for (int &M : HiBlendMask)
10016 // Otherwise just unpack the low half of V into VLoHalf and the high half into
10017 // VHiHalf so that we can blend them as i16s.
10018 VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10019 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10020 VHiHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10021 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10024 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
10025 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
10027 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10030 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10032 /// This routine breaks down the specific type of 128-bit shuffle and
10033 /// dispatches to the lowering routines accordingly.
10034 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10035 MVT VT, const X86Subtarget *Subtarget,
10036 SelectionDAG &DAG) {
10037 switch (VT.SimpleTy) {
10039 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10041 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10043 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10045 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10047 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10049 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10052 llvm_unreachable("Unimplemented!");
10056 /// \brief Helper function to test whether a shuffle mask could be
10057 /// simplified by widening the elements being shuffled.
10059 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10060 /// leaves it in an unspecified state.
10062 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10063 /// shuffle masks. The latter have the special property of a '-2' representing
10064 /// a zero-ed lane of a vector.
10065 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10066 SmallVectorImpl<int> &WidenedMask) {
10067 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10068 // If both elements are undef, its trivial.
10069 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10070 WidenedMask.push_back(SM_SentinelUndef);
10074 // Check for an undef mask and a mask value properly aligned to fit with
10075 // a pair of values. If we find such a case, use the non-undef mask's value.
10076 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10077 WidenedMask.push_back(Mask[i + 1] / 2);
10080 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10081 WidenedMask.push_back(Mask[i] / 2);
10085 // When zeroing, we need to spread the zeroing across both lanes to widen.
10086 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10087 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10088 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10089 WidenedMask.push_back(SM_SentinelZero);
10095 // Finally check if the two mask values are adjacent and aligned with
10097 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10098 WidenedMask.push_back(Mask[i] / 2);
10102 // Otherwise we can't safely widen the elements used in this shuffle.
10105 assert(WidenedMask.size() == Mask.size() / 2 &&
10106 "Incorrect size of mask after widening the elements!");
10111 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10113 /// This routine just extracts two subvectors, shuffles them independently, and
10114 /// then concatenates them back together. This should work effectively with all
10115 /// AVX vector shuffle types.
10116 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10117 SDValue V2, ArrayRef<int> Mask,
10118 SelectionDAG &DAG) {
10119 assert(VT.getSizeInBits() >= 256 &&
10120 "Only for 256-bit or wider vector shuffles!");
10121 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10122 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10124 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10125 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10127 int NumElements = VT.getVectorNumElements();
10128 int SplitNumElements = NumElements / 2;
10129 MVT ScalarVT = VT.getScalarType();
10130 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10132 // Rather than splitting build-vectors, just build two narrower build
10133 // vectors. This helps shuffling with splats and zeros.
10134 auto SplitVector = [&](SDValue V) {
10135 while (V.getOpcode() == ISD::BITCAST)
10136 V = V->getOperand(0);
10138 MVT OrigVT = V.getSimpleValueType();
10139 int OrigNumElements = OrigVT.getVectorNumElements();
10140 int OrigSplitNumElements = OrigNumElements / 2;
10141 MVT OrigScalarVT = OrigVT.getScalarType();
10142 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10146 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10148 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10149 DAG.getIntPtrConstant(0));
10150 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10151 DAG.getIntPtrConstant(OrigSplitNumElements));
10154 SmallVector<SDValue, 16> LoOps, HiOps;
10155 for (int i = 0; i < OrigSplitNumElements; ++i) {
10156 LoOps.push_back(BV->getOperand(i));
10157 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10159 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10160 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10162 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10163 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10166 SDValue LoV1, HiV1, LoV2, HiV2;
10167 std::tie(LoV1, HiV1) = SplitVector(V1);
10168 std::tie(LoV2, HiV2) = SplitVector(V2);
10170 // Now create two 4-way blends of these half-width vectors.
10171 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10172 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10173 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10174 for (int i = 0; i < SplitNumElements; ++i) {
10175 int M = HalfMask[i];
10176 if (M >= NumElements) {
10177 if (M >= NumElements + SplitNumElements)
10181 V2BlendMask.push_back(M - NumElements);
10182 V1BlendMask.push_back(-1);
10183 BlendMask.push_back(SplitNumElements + i);
10184 } else if (M >= 0) {
10185 if (M >= SplitNumElements)
10189 V2BlendMask.push_back(-1);
10190 V1BlendMask.push_back(M);
10191 BlendMask.push_back(i);
10193 V2BlendMask.push_back(-1);
10194 V1BlendMask.push_back(-1);
10195 BlendMask.push_back(-1);
10199 // Because the lowering happens after all combining takes place, we need to
10200 // manually combine these blend masks as much as possible so that we create
10201 // a minimal number of high-level vector shuffle nodes.
10203 // First try just blending the halves of V1 or V2.
10204 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10205 return DAG.getUNDEF(SplitVT);
10206 if (!UseLoV2 && !UseHiV2)
10207 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10208 if (!UseLoV1 && !UseHiV1)
10209 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10211 SDValue V1Blend, V2Blend;
10212 if (UseLoV1 && UseHiV1) {
10214 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10216 // We only use half of V1 so map the usage down into the final blend mask.
10217 V1Blend = UseLoV1 ? LoV1 : HiV1;
10218 for (int i = 0; i < SplitNumElements; ++i)
10219 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10220 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10222 if (UseLoV2 && UseHiV2) {
10224 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10226 // We only use half of V2 so map the usage down into the final blend mask.
10227 V2Blend = UseLoV2 ? LoV2 : HiV2;
10228 for (int i = 0; i < SplitNumElements; ++i)
10229 if (BlendMask[i] >= SplitNumElements)
10230 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10232 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10234 SDValue Lo = HalfBlend(LoMask);
10235 SDValue Hi = HalfBlend(HiMask);
10236 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10239 /// \brief Either split a vector in halves or decompose the shuffles and the
10242 /// This is provided as a good fallback for many lowerings of non-single-input
10243 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10244 /// between splitting the shuffle into 128-bit components and stitching those
10245 /// back together vs. extracting the single-input shuffles and blending those
10247 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10248 SDValue V2, ArrayRef<int> Mask,
10249 SelectionDAG &DAG) {
10250 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10251 "lower single-input shuffles as it "
10252 "could then recurse on itself.");
10253 int Size = Mask.size();
10255 // If this can be modeled as a broadcast of two elements followed by a blend,
10256 // prefer that lowering. This is especially important because broadcasts can
10257 // often fold with memory operands.
10258 auto DoBothBroadcast = [&] {
10259 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10262 if (V2BroadcastIdx == -1)
10263 V2BroadcastIdx = M - Size;
10264 else if (M - Size != V2BroadcastIdx)
10266 } else if (M >= 0) {
10267 if (V1BroadcastIdx == -1)
10268 V1BroadcastIdx = M;
10269 else if (M != V1BroadcastIdx)
10274 if (DoBothBroadcast())
10275 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10278 // If the inputs all stem from a single 128-bit lane of each input, then we
10279 // split them rather than blending because the split will decompose to
10280 // unusually few instructions.
10281 int LaneCount = VT.getSizeInBits() / 128;
10282 int LaneSize = Size / LaneCount;
10283 SmallBitVector LaneInputs[2];
10284 LaneInputs[0].resize(LaneCount, false);
10285 LaneInputs[1].resize(LaneCount, false);
10286 for (int i = 0; i < Size; ++i)
10288 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10289 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10290 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10292 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10293 // that the decomposed single-input shuffles don't end up here.
10294 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10297 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10298 /// a permutation and blend of those lanes.
10300 /// This essentially blends the out-of-lane inputs to each lane into the lane
10301 /// from a permuted copy of the vector. This lowering strategy results in four
10302 /// instructions in the worst case for a single-input cross lane shuffle which
10303 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10304 /// of. Special cases for each particular shuffle pattern should be handled
10305 /// prior to trying this lowering.
10306 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10307 SDValue V1, SDValue V2,
10308 ArrayRef<int> Mask,
10309 SelectionDAG &DAG) {
10310 // FIXME: This should probably be generalized for 512-bit vectors as well.
10311 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10312 int LaneSize = Mask.size() / 2;
10314 // If there are only inputs from one 128-bit lane, splitting will in fact be
10315 // less expensive. The flags track wether the given lane contains an element
10316 // that crosses to another lane.
10317 bool LaneCrossing[2] = {false, false};
10318 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10319 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10320 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10321 if (!LaneCrossing[0] || !LaneCrossing[1])
10322 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10324 if (isSingleInputShuffleMask(Mask)) {
10325 SmallVector<int, 32> FlippedBlendMask;
10326 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10327 FlippedBlendMask.push_back(
10328 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10330 : Mask[i] % LaneSize +
10331 (i / LaneSize) * LaneSize + Size));
10333 // Flip the vector, and blend the results which should now be in-lane. The
10334 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10335 // 5 for the high source. The value 3 selects the high half of source 2 and
10336 // the value 2 selects the low half of source 2. We only use source 2 to
10337 // allow folding it into a memory operand.
10338 unsigned PERMMask = 3 | 2 << 4;
10339 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10340 V1, DAG.getConstant(PERMMask, MVT::i8));
10341 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10344 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10345 // will be handled by the above logic and a blend of the results, much like
10346 // other patterns in AVX.
10347 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10350 /// \brief Handle lowering 2-lane 128-bit shuffles.
10351 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10352 SDValue V2, ArrayRef<int> Mask,
10353 const X86Subtarget *Subtarget,
10354 SelectionDAG &DAG) {
10355 // Blends are faster and handle all the non-lane-crossing cases.
10356 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10360 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10361 VT.getVectorNumElements() / 2);
10362 // Check for patterns which can be matched with a single insert of a 128-bit
10364 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10365 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10366 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10367 DAG.getIntPtrConstant(0));
10368 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10369 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10370 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10372 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10373 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10374 DAG.getIntPtrConstant(0));
10375 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10376 DAG.getIntPtrConstant(2));
10377 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10380 // Otherwise form a 128-bit permutation.
10381 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10382 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10383 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10384 DAG.getConstant(PermMask, MVT::i8));
10387 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10388 /// shuffling each lane.
10390 /// This will only succeed when the result of fixing the 128-bit lanes results
10391 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10392 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10393 /// the lane crosses early and then use simpler shuffles within each lane.
10395 /// FIXME: It might be worthwhile at some point to support this without
10396 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10397 /// in x86 only floating point has interesting non-repeating shuffles, and even
10398 /// those are still *marginally* more expensive.
10399 static SDValue lowerVectorShuffleByMerging128BitLanes(
10400 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10401 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10402 assert(!isSingleInputShuffleMask(Mask) &&
10403 "This is only useful with multiple inputs.");
10405 int Size = Mask.size();
10406 int LaneSize = 128 / VT.getScalarSizeInBits();
10407 int NumLanes = Size / LaneSize;
10408 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10410 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10411 // check whether the in-128-bit lane shuffles share a repeating pattern.
10412 SmallVector<int, 4> Lanes;
10413 Lanes.resize(NumLanes, -1);
10414 SmallVector<int, 4> InLaneMask;
10415 InLaneMask.resize(LaneSize, -1);
10416 for (int i = 0; i < Size; ++i) {
10420 int j = i / LaneSize;
10422 if (Lanes[j] < 0) {
10423 // First entry we've seen for this lane.
10424 Lanes[j] = Mask[i] / LaneSize;
10425 } else if (Lanes[j] != Mask[i] / LaneSize) {
10426 // This doesn't match the lane selected previously!
10430 // Check that within each lane we have a consistent shuffle mask.
10431 int k = i % LaneSize;
10432 if (InLaneMask[k] < 0) {
10433 InLaneMask[k] = Mask[i] % LaneSize;
10434 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10435 // This doesn't fit a repeating in-lane mask.
10440 // First shuffle the lanes into place.
10441 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10442 VT.getSizeInBits() / 64);
10443 SmallVector<int, 8> LaneMask;
10444 LaneMask.resize(NumLanes * 2, -1);
10445 for (int i = 0; i < NumLanes; ++i)
10446 if (Lanes[i] >= 0) {
10447 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10448 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10451 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10452 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10453 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10455 // Cast it back to the type we actually want.
10456 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10458 // Now do a simple shuffle that isn't lane crossing.
10459 SmallVector<int, 8> NewMask;
10460 NewMask.resize(Size, -1);
10461 for (int i = 0; i < Size; ++i)
10463 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10464 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10465 "Must not introduce lane crosses at this point!");
10467 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10470 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10473 /// This returns true if the elements from a particular input are already in the
10474 /// slot required by the given mask and require no permutation.
10475 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10476 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10477 int Size = Mask.size();
10478 for (int i = 0; i < Size; ++i)
10479 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10485 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10487 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10488 /// isn't available.
10489 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10490 const X86Subtarget *Subtarget,
10491 SelectionDAG &DAG) {
10493 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10494 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10495 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10496 ArrayRef<int> Mask = SVOp->getMask();
10497 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10499 SmallVector<int, 4> WidenedMask;
10500 if (canWidenShuffleElements(Mask, WidenedMask))
10501 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10504 if (isSingleInputShuffleMask(Mask)) {
10505 // Check for being able to broadcast a single element.
10506 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10507 Mask, Subtarget, DAG))
10510 // Use low duplicate instructions for masks that match their pattern.
10511 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10512 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10514 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10515 // Non-half-crossing single input shuffles can be lowerid with an
10516 // interleaved permutation.
10517 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10518 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10519 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10520 DAG.getConstant(VPERMILPMask, MVT::i8));
10523 // With AVX2 we have direct support for this permutation.
10524 if (Subtarget->hasAVX2())
10525 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10526 getV4X86ShuffleImm8ForMask(Mask, DAG));
10528 // Otherwise, fall back.
10529 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10533 // X86 has dedicated unpack instructions that can handle specific blend
10534 // operations: UNPCKH and UNPCKL.
10535 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10536 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10537 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10538 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10539 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
10540 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V2, V1);
10541 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
10542 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
10544 // If we have a single input to the zero element, insert that into V1 if we
10545 // can do so cheaply.
10546 int NumV2Elements =
10547 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10548 if (NumV2Elements == 1 && Mask[0] >= 4)
10549 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10550 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10553 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10557 // Check if the blend happens to exactly fit that of SHUFPD.
10558 if ((Mask[0] == -1 || Mask[0] < 2) &&
10559 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10560 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10561 (Mask[3] == -1 || Mask[3] >= 6)) {
10562 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10563 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10564 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10565 DAG.getConstant(SHUFPDMask, MVT::i8));
10567 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10568 (Mask[1] == -1 || Mask[1] < 2) &&
10569 (Mask[2] == -1 || Mask[2] >= 6) &&
10570 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10571 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10572 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10573 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10574 DAG.getConstant(SHUFPDMask, MVT::i8));
10577 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10578 // shuffle. However, if we have AVX2 and either inputs are already in place,
10579 // we will be able to shuffle even across lanes the other input in a single
10580 // instruction so skip this pattern.
10581 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10582 isShuffleMaskInputInPlace(1, Mask))))
10583 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10584 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10587 // If we have AVX2 then we always want to lower with a blend because an v4 we
10588 // can fully permute the elements.
10589 if (Subtarget->hasAVX2())
10590 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10593 // Otherwise fall back on generic lowering.
10594 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10597 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10599 /// This routine is only called when we have AVX2 and thus a reasonable
10600 /// instruction set for v4i64 shuffling..
10601 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10602 const X86Subtarget *Subtarget,
10603 SelectionDAG &DAG) {
10605 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10606 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10607 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10608 ArrayRef<int> Mask = SVOp->getMask();
10609 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10610 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10612 SmallVector<int, 4> WidenedMask;
10613 if (canWidenShuffleElements(Mask, WidenedMask))
10614 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10617 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10621 // Check for being able to broadcast a single element.
10622 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10623 Mask, Subtarget, DAG))
10626 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10627 // use lower latency instructions that will operate on both 128-bit lanes.
10628 SmallVector<int, 2> RepeatedMask;
10629 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10630 if (isSingleInputShuffleMask(Mask)) {
10631 int PSHUFDMask[] = {-1, -1, -1, -1};
10632 for (int i = 0; i < 2; ++i)
10633 if (RepeatedMask[i] >= 0) {
10634 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10635 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10637 return DAG.getNode(
10638 ISD::BITCAST, DL, MVT::v4i64,
10639 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10640 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10641 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10645 // AVX2 provides a direct instruction for permuting a single input across
10647 if (isSingleInputShuffleMask(Mask))
10648 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10649 getV4X86ShuffleImm8ForMask(Mask, DAG));
10651 // Try to use shift instructions.
10652 if (SDValue Shift =
10653 lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
10656 // Use dedicated unpack instructions for masks that match their pattern.
10657 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10658 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10659 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10660 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10661 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
10662 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V2, V1);
10663 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
10664 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V2, V1);
10666 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10667 // shuffle. However, if we have AVX2 and either inputs are already in place,
10668 // we will be able to shuffle even across lanes the other input in a single
10669 // instruction so skip this pattern.
10670 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10671 isShuffleMaskInputInPlace(1, Mask))))
10672 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10673 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10676 // Otherwise fall back on generic blend lowering.
10677 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10681 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10683 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10684 /// isn't available.
10685 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10686 const X86Subtarget *Subtarget,
10687 SelectionDAG &DAG) {
10689 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10690 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10691 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10692 ArrayRef<int> Mask = SVOp->getMask();
10693 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10695 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10699 // Check for being able to broadcast a single element.
10700 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10701 Mask, Subtarget, DAG))
10704 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10705 // options to efficiently lower the shuffle.
10706 SmallVector<int, 4> RepeatedMask;
10707 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10708 assert(RepeatedMask.size() == 4 &&
10709 "Repeated masks must be half the mask width!");
10711 // Use even/odd duplicate instructions for masks that match their pattern.
10712 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10713 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10714 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10715 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10717 if (isSingleInputShuffleMask(Mask))
10718 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10719 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10721 // Use dedicated unpack instructions for masks that match their pattern.
10722 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10723 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10724 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10725 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10726 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
10727 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V2, V1);
10728 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
10729 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V2, V1);
10731 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10732 // have already handled any direct blends. We also need to squash the
10733 // repeated mask into a simulated v4f32 mask.
10734 for (int i = 0; i < 4; ++i)
10735 if (RepeatedMask[i] >= 8)
10736 RepeatedMask[i] -= 4;
10737 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10740 // If we have a single input shuffle with different shuffle patterns in the
10741 // two 128-bit lanes use the variable mask to VPERMILPS.
10742 if (isSingleInputShuffleMask(Mask)) {
10743 SDValue VPermMask[8];
10744 for (int i = 0; i < 8; ++i)
10745 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10746 : DAG.getConstant(Mask[i], MVT::i32);
10747 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10748 return DAG.getNode(
10749 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10750 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10752 if (Subtarget->hasAVX2())
10753 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10754 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10755 DAG.getNode(ISD::BUILD_VECTOR, DL,
10756 MVT::v8i32, VPermMask)),
10759 // Otherwise, fall back.
10760 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10764 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10766 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10767 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10770 // If we have AVX2 then we always want to lower with a blend because at v8 we
10771 // can fully permute the elements.
10772 if (Subtarget->hasAVX2())
10773 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10776 // Otherwise fall back on generic lowering.
10777 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10780 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10782 /// This routine is only called when we have AVX2 and thus a reasonable
10783 /// instruction set for v8i32 shuffling..
10784 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10785 const X86Subtarget *Subtarget,
10786 SelectionDAG &DAG) {
10788 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10789 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10790 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10791 ArrayRef<int> Mask = SVOp->getMask();
10792 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10793 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10795 // Whenever we can lower this as a zext, that instruction is strictly faster
10796 // than any alternative. It also allows us to fold memory operands into the
10797 // shuffle in many cases.
10798 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10799 Mask, Subtarget, DAG))
10802 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10806 // Check for being able to broadcast a single element.
10807 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10808 Mask, Subtarget, DAG))
10811 // If the shuffle mask is repeated in each 128-bit lane we can use more
10812 // efficient instructions that mirror the shuffles across the two 128-bit
10814 SmallVector<int, 4> RepeatedMask;
10815 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10816 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10817 if (isSingleInputShuffleMask(Mask))
10818 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10819 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10821 // Use dedicated unpack instructions for masks that match their pattern.
10822 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10823 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10824 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10825 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10826 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
10827 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V2, V1);
10828 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
10829 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V2, V1);
10832 // Try to use shift instructions.
10833 if (SDValue Shift =
10834 lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
10837 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10838 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10841 // If the shuffle patterns aren't repeated but it is a single input, directly
10842 // generate a cross-lane VPERMD instruction.
10843 if (isSingleInputShuffleMask(Mask)) {
10844 SDValue VPermMask[8];
10845 for (int i = 0; i < 8; ++i)
10846 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10847 : DAG.getConstant(Mask[i], MVT::i32);
10848 return DAG.getNode(
10849 X86ISD::VPERMV, DL, MVT::v8i32,
10850 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10853 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10855 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10856 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10859 // Otherwise fall back on generic blend lowering.
10860 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10864 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10866 /// This routine is only called when we have AVX2 and thus a reasonable
10867 /// instruction set for v16i16 shuffling..
10868 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10869 const X86Subtarget *Subtarget,
10870 SelectionDAG &DAG) {
10872 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10873 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10874 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10875 ArrayRef<int> Mask = SVOp->getMask();
10876 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10877 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10879 // Whenever we can lower this as a zext, that instruction is strictly faster
10880 // than any alternative. It also allows us to fold memory operands into the
10881 // shuffle in many cases.
10882 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10883 Mask, Subtarget, DAG))
10886 // Check for being able to broadcast a single element.
10887 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10888 Mask, Subtarget, DAG))
10891 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10895 // Use dedicated unpack instructions for masks that match their pattern.
10896 if (isShuffleEquivalent(V1, V2, Mask,
10897 // First 128-bit lane:
10898 0, 16, 1, 17, 2, 18, 3, 19,
10899 // Second 128-bit lane:
10900 8, 24, 9, 25, 10, 26, 11, 27))
10901 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10902 if (isShuffleEquivalent(V1, V2, Mask,
10903 // First 128-bit lane:
10904 4, 20, 5, 21, 6, 22, 7, 23,
10905 // Second 128-bit lane:
10906 12, 28, 13, 29, 14, 30, 15, 31))
10907 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10909 // Try to use shift instructions.
10910 if (SDValue Shift =
10911 lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
10914 // Try to use byte rotation instructions.
10915 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10916 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10919 if (isSingleInputShuffleMask(Mask)) {
10920 // There are no generalized cross-lane shuffle operations available on i16
10922 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10923 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10926 SDValue PSHUFBMask[32];
10927 for (int i = 0; i < 16; ++i) {
10928 if (Mask[i] == -1) {
10929 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10933 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10934 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10935 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10936 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10938 return DAG.getNode(
10939 ISD::BITCAST, DL, MVT::v16i16,
10941 X86ISD::PSHUFB, DL, MVT::v32i8,
10942 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10943 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10946 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10948 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10949 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10952 // Otherwise fall back on generic lowering.
10953 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10956 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10958 /// This routine is only called when we have AVX2 and thus a reasonable
10959 /// instruction set for v32i8 shuffling..
10960 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10961 const X86Subtarget *Subtarget,
10962 SelectionDAG &DAG) {
10964 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10965 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10966 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10967 ArrayRef<int> Mask = SVOp->getMask();
10968 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10969 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10971 // Whenever we can lower this as a zext, that instruction is strictly faster
10972 // than any alternative. It also allows us to fold memory operands into the
10973 // shuffle in many cases.
10974 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
10975 Mask, Subtarget, DAG))
10978 // Check for being able to broadcast a single element.
10979 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10980 Mask, Subtarget, DAG))
10983 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10987 // Use dedicated unpack instructions for masks that match their pattern.
10988 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10990 if (isShuffleEquivalent(
10992 // First 128-bit lane:
10993 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10994 // Second 128-bit lane:
10995 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10996 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10997 if (isShuffleEquivalent(
10999 // First 128-bit lane:
11000 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11001 // Second 128-bit lane:
11002 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11003 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11005 // Try to use shift instructions.
11006 if (SDValue Shift =
11007 lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
11010 // Try to use byte rotation instructions.
11011 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11012 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11015 if (isSingleInputShuffleMask(Mask)) {
11016 // There are no generalized cross-lane shuffle operations available on i8
11018 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11019 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11022 SDValue PSHUFBMask[32];
11023 for (int i = 0; i < 32; ++i)
11026 ? DAG.getUNDEF(MVT::i8)
11027 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11029 return DAG.getNode(
11030 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11031 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11034 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11036 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11037 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11040 // Otherwise fall back on generic lowering.
11041 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11044 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11046 /// This routine either breaks down the specific type of a 256-bit x86 vector
11047 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11048 /// together based on the available instructions.
11049 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11050 MVT VT, const X86Subtarget *Subtarget,
11051 SelectionDAG &DAG) {
11053 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11054 ArrayRef<int> Mask = SVOp->getMask();
11056 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11057 // check for those subtargets here and avoid much of the subtarget querying in
11058 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11059 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11060 // floating point types there eventually, just immediately cast everything to
11061 // a float and operate entirely in that domain.
11062 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11063 int ElementBits = VT.getScalarSizeInBits();
11064 if (ElementBits < 32)
11065 // No floating point type available, decompose into 128-bit vectors.
11066 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11068 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11069 VT.getVectorNumElements());
11070 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11071 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11072 return DAG.getNode(ISD::BITCAST, DL, VT,
11073 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11076 switch (VT.SimpleTy) {
11078 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11080 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11082 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11084 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11086 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11088 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11091 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11095 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11096 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11097 const X86Subtarget *Subtarget,
11098 SelectionDAG &DAG) {
11100 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11101 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11102 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11103 ArrayRef<int> Mask = SVOp->getMask();
11104 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11106 // X86 has dedicated unpack instructions that can handle specific blend
11107 // operations: UNPCKH and UNPCKL.
11108 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11109 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11110 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11111 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11113 // FIXME: Implement direct support for this type!
11114 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11117 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11118 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11119 const X86Subtarget *Subtarget,
11120 SelectionDAG &DAG) {
11122 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11123 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11124 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11125 ArrayRef<int> Mask = SVOp->getMask();
11126 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11128 // Use dedicated unpack instructions for masks that match their pattern.
11129 if (isShuffleEquivalent(V1, V2, Mask,
11130 0, 16, 1, 17, 4, 20, 5, 21,
11131 8, 24, 9, 25, 12, 28, 13, 29))
11132 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11133 if (isShuffleEquivalent(V1, V2, Mask,
11134 2, 18, 3, 19, 6, 22, 7, 23,
11135 10, 26, 11, 27, 14, 30, 15, 31))
11136 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11138 // FIXME: Implement direct support for this type!
11139 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11142 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11143 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11144 const X86Subtarget *Subtarget,
11145 SelectionDAG &DAG) {
11147 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11148 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11149 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11150 ArrayRef<int> Mask = SVOp->getMask();
11151 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11153 // X86 has dedicated unpack instructions that can handle specific blend
11154 // operations: UNPCKH and UNPCKL.
11155 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11156 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11157 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11158 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11160 // FIXME: Implement direct support for this type!
11161 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11164 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11165 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11166 const X86Subtarget *Subtarget,
11167 SelectionDAG &DAG) {
11169 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11170 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11171 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11172 ArrayRef<int> Mask = SVOp->getMask();
11173 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11175 // Use dedicated unpack instructions for masks that match their pattern.
11176 if (isShuffleEquivalent(V1, V2, Mask,
11177 0, 16, 1, 17, 4, 20, 5, 21,
11178 8, 24, 9, 25, 12, 28, 13, 29))
11179 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11180 if (isShuffleEquivalent(V1, V2, Mask,
11181 2, 18, 3, 19, 6, 22, 7, 23,
11182 10, 26, 11, 27, 14, 30, 15, 31))
11183 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11185 // FIXME: Implement direct support for this type!
11186 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11189 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11190 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11191 const X86Subtarget *Subtarget,
11192 SelectionDAG &DAG) {
11194 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11195 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11196 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11197 ArrayRef<int> Mask = SVOp->getMask();
11198 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11199 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11201 // FIXME: Implement direct support for this type!
11202 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11205 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11206 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11207 const X86Subtarget *Subtarget,
11208 SelectionDAG &DAG) {
11210 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11211 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11212 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11213 ArrayRef<int> Mask = SVOp->getMask();
11214 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11215 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11217 // FIXME: Implement direct support for this type!
11218 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11221 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11223 /// This routine either breaks down the specific type of a 512-bit x86 vector
11224 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11225 /// together based on the available instructions.
11226 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11227 MVT VT, const X86Subtarget *Subtarget,
11228 SelectionDAG &DAG) {
11230 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11231 ArrayRef<int> Mask = SVOp->getMask();
11232 assert(Subtarget->hasAVX512() &&
11233 "Cannot lower 512-bit vectors w/ basic ISA!");
11235 // Check for being able to broadcast a single element.
11236 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11237 Mask, Subtarget, DAG))
11240 // Dispatch to each element type for lowering. If we don't have supprot for
11241 // specific element type shuffles at 512 bits, immediately split them and
11242 // lower them. Each lowering routine of a given type is allowed to assume that
11243 // the requisite ISA extensions for that element type are available.
11244 switch (VT.SimpleTy) {
11246 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11248 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11250 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11252 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11254 if (Subtarget->hasBWI())
11255 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11258 if (Subtarget->hasBWI())
11259 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11263 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11266 // Otherwise fall back on splitting.
11267 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11270 /// \brief Top-level lowering for x86 vector shuffles.
11272 /// This handles decomposition, canonicalization, and lowering of all x86
11273 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11274 /// above in helper routines. The canonicalization attempts to widen shuffles
11275 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11276 /// s.t. only one of the two inputs needs to be tested, etc.
11277 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11278 SelectionDAG &DAG) {
11279 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11280 ArrayRef<int> Mask = SVOp->getMask();
11281 SDValue V1 = Op.getOperand(0);
11282 SDValue V2 = Op.getOperand(1);
11283 MVT VT = Op.getSimpleValueType();
11284 int NumElements = VT.getVectorNumElements();
11287 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11289 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11290 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11291 if (V1IsUndef && V2IsUndef)
11292 return DAG.getUNDEF(VT);
11294 // When we create a shuffle node we put the UNDEF node to second operand,
11295 // but in some cases the first operand may be transformed to UNDEF.
11296 // In this case we should just commute the node.
11298 return DAG.getCommutedVectorShuffle(*SVOp);
11300 // Check for non-undef masks pointing at an undef vector and make the masks
11301 // undef as well. This makes it easier to match the shuffle based solely on
11305 if (M >= NumElements) {
11306 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11307 for (int &M : NewMask)
11308 if (M >= NumElements)
11310 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11313 // We actually see shuffles that are entirely re-arrangements of a set of
11314 // zero inputs. This mostly happens while decomposing complex shuffles into
11315 // simple ones. Directly lower these as a buildvector of zeros.
11316 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11317 if (Zeroable.all())
11318 return getZeroVector(VT, Subtarget, DAG, dl);
11320 // Try to collapse shuffles into using a vector type with fewer elements but
11321 // wider element types. We cap this to not form integers or floating point
11322 // elements wider than 64 bits, but it might be interesting to form i128
11323 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11324 SmallVector<int, 16> WidenedMask;
11325 if (VT.getScalarSizeInBits() < 64 &&
11326 canWidenShuffleElements(Mask, WidenedMask)) {
11327 MVT NewEltVT = VT.isFloatingPoint()
11328 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11329 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11330 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11331 // Make sure that the new vector type is legal. For example, v2f64 isn't
11333 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11334 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11335 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11336 return DAG.getNode(ISD::BITCAST, dl, VT,
11337 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11341 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11342 for (int M : SVOp->getMask())
11344 ++NumUndefElements;
11345 else if (M < NumElements)
11350 // Commute the shuffle as needed such that more elements come from V1 than
11351 // V2. This allows us to match the shuffle pattern strictly on how many
11352 // elements come from V1 without handling the symmetric cases.
11353 if (NumV2Elements > NumV1Elements)
11354 return DAG.getCommutedVectorShuffle(*SVOp);
11356 // When the number of V1 and V2 elements are the same, try to minimize the
11357 // number of uses of V2 in the low half of the vector. When that is tied,
11358 // ensure that the sum of indices for V1 is equal to or lower than the sum
11359 // indices for V2. When those are equal, try to ensure that the number of odd
11360 // indices for V1 is lower than the number of odd indices for V2.
11361 if (NumV1Elements == NumV2Elements) {
11362 int LowV1Elements = 0, LowV2Elements = 0;
11363 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11364 if (M >= NumElements)
11368 if (LowV2Elements > LowV1Elements) {
11369 return DAG.getCommutedVectorShuffle(*SVOp);
11370 } else if (LowV2Elements == LowV1Elements) {
11371 int SumV1Indices = 0, SumV2Indices = 0;
11372 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11373 if (SVOp->getMask()[i] >= NumElements)
11375 else if (SVOp->getMask()[i] >= 0)
11377 if (SumV2Indices < SumV1Indices) {
11378 return DAG.getCommutedVectorShuffle(*SVOp);
11379 } else if (SumV2Indices == SumV1Indices) {
11380 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11381 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11382 if (SVOp->getMask()[i] >= NumElements)
11383 NumV2OddIndices += i % 2;
11384 else if (SVOp->getMask()[i] >= 0)
11385 NumV1OddIndices += i % 2;
11386 if (NumV2OddIndices < NumV1OddIndices)
11387 return DAG.getCommutedVectorShuffle(*SVOp);
11392 // For each vector width, delegate to a specialized lowering routine.
11393 if (VT.getSizeInBits() == 128)
11394 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11396 if (VT.getSizeInBits() == 256)
11397 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11399 // Force AVX-512 vectors to be scalarized for now.
11400 // FIXME: Implement AVX-512 support!
11401 if (VT.getSizeInBits() == 512)
11402 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11404 llvm_unreachable("Unimplemented!");
11408 //===----------------------------------------------------------------------===//
11409 // Legacy vector shuffle lowering
11411 // This code is the legacy code handling vector shuffles until the above
11412 // replaces its functionality and performance.
11413 //===----------------------------------------------------------------------===//
11415 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11416 bool hasInt256, unsigned *MaskOut = nullptr) {
11417 MVT EltVT = VT.getVectorElementType();
11419 // There is no blend with immediate in AVX-512.
11420 if (VT.is512BitVector())
11423 if (!hasSSE41 || EltVT == MVT::i8)
11425 if (!hasInt256 && VT == MVT::v16i16)
11428 unsigned MaskValue = 0;
11429 unsigned NumElems = VT.getVectorNumElements();
11430 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11431 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11432 unsigned NumElemsInLane = NumElems / NumLanes;
11434 // Blend for v16i16 should be symmetric for both lanes.
11435 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11437 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11438 int EltIdx = MaskVals[i];
11440 if ((EltIdx < 0 || EltIdx == (int)i) &&
11441 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11444 if (((unsigned)EltIdx == (i + NumElems)) &&
11445 (SndLaneEltIdx < 0 ||
11446 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11447 MaskValue |= (1 << i);
11453 *MaskOut = MaskValue;
11457 // Try to lower a shuffle node into a simple blend instruction.
11458 // This function assumes isBlendMask returns true for this
11459 // SuffleVectorSDNode
11460 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11461 unsigned MaskValue,
11462 const X86Subtarget *Subtarget,
11463 SelectionDAG &DAG) {
11464 MVT VT = SVOp->getSimpleValueType(0);
11465 MVT EltVT = VT.getVectorElementType();
11466 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11467 Subtarget->hasInt256() && "Trying to lower a "
11468 "VECTOR_SHUFFLE to a Blend but "
11469 "with the wrong mask"));
11470 SDValue V1 = SVOp->getOperand(0);
11471 SDValue V2 = SVOp->getOperand(1);
11473 unsigned NumElems = VT.getVectorNumElements();
11475 // Convert i32 vectors to floating point if it is not AVX2.
11476 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11478 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11479 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11481 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11482 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11485 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11486 DAG.getConstant(MaskValue, MVT::i32));
11487 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11490 /// In vector type \p VT, return true if the element at index \p InputIdx
11491 /// falls on a different 128-bit lane than \p OutputIdx.
11492 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11493 unsigned OutputIdx) {
11494 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11495 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11498 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11499 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11500 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11501 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11503 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11504 SelectionDAG &DAG) {
11505 MVT VT = V1.getSimpleValueType();
11506 assert(VT.is128BitVector() || VT.is256BitVector());
11508 MVT EltVT = VT.getVectorElementType();
11509 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11510 unsigned NumElts = VT.getVectorNumElements();
11512 SmallVector<SDValue, 32> PshufbMask;
11513 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11514 int InputIdx = MaskVals[OutputIdx];
11515 unsigned InputByteIdx;
11517 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11518 InputByteIdx = 0x80;
11520 // Cross lane is not allowed.
11521 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11523 InputByteIdx = InputIdx * EltSizeInBytes;
11524 // Index is an byte offset within the 128-bit lane.
11525 InputByteIdx &= 0xf;
11528 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11529 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11530 if (InputByteIdx != 0x80)
11535 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11537 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11538 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11539 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11542 // v8i16 shuffles - Prefer shuffles in the following order:
11543 // 1. [all] pshuflw, pshufhw, optional move
11544 // 2. [ssse3] 1 x pshufb
11545 // 3. [ssse3] 2 x pshufb + 1 x por
11546 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11548 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11549 SelectionDAG &DAG) {
11550 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11551 SDValue V1 = SVOp->getOperand(0);
11552 SDValue V2 = SVOp->getOperand(1);
11554 SmallVector<int, 8> MaskVals;
11556 // Determine if more than 1 of the words in each of the low and high quadwords
11557 // of the result come from the same quadword of one of the two inputs. Undef
11558 // mask values count as coming from any quadword, for better codegen.
11560 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11561 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11562 unsigned LoQuad[] = { 0, 0, 0, 0 };
11563 unsigned HiQuad[] = { 0, 0, 0, 0 };
11564 // Indices of quads used.
11565 std::bitset<4> InputQuads;
11566 for (unsigned i = 0; i < 8; ++i) {
11567 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11568 int EltIdx = SVOp->getMaskElt(i);
11569 MaskVals.push_back(EltIdx);
11577 ++Quad[EltIdx / 4];
11578 InputQuads.set(EltIdx / 4);
11581 int BestLoQuad = -1;
11582 unsigned MaxQuad = 1;
11583 for (unsigned i = 0; i < 4; ++i) {
11584 if (LoQuad[i] > MaxQuad) {
11586 MaxQuad = LoQuad[i];
11590 int BestHiQuad = -1;
11592 for (unsigned i = 0; i < 4; ++i) {
11593 if (HiQuad[i] > MaxQuad) {
11595 MaxQuad = HiQuad[i];
11599 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11600 // of the two input vectors, shuffle them into one input vector so only a
11601 // single pshufb instruction is necessary. If there are more than 2 input
11602 // quads, disable the next transformation since it does not help SSSE3.
11603 bool V1Used = InputQuads[0] || InputQuads[1];
11604 bool V2Used = InputQuads[2] || InputQuads[3];
11605 if (Subtarget->hasSSSE3()) {
11606 if (InputQuads.count() == 2 && V1Used && V2Used) {
11607 BestLoQuad = InputQuads[0] ? 0 : 1;
11608 BestHiQuad = InputQuads[2] ? 2 : 3;
11610 if (InputQuads.count() > 2) {
11616 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11617 // the shuffle mask. If a quad is scored as -1, that means that it contains
11618 // words from all 4 input quadwords.
11620 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11622 BestLoQuad < 0 ? 0 : BestLoQuad,
11623 BestHiQuad < 0 ? 1 : BestHiQuad
11625 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11626 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11627 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11628 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11630 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11631 // source words for the shuffle, to aid later transformations.
11632 bool AllWordsInNewV = true;
11633 bool InOrder[2] = { true, true };
11634 for (unsigned i = 0; i != 8; ++i) {
11635 int idx = MaskVals[i];
11637 InOrder[i/4] = false;
11638 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11640 AllWordsInNewV = false;
11644 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11645 if (AllWordsInNewV) {
11646 for (int i = 0; i != 8; ++i) {
11647 int idx = MaskVals[i];
11650 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11651 if ((idx != i) && idx < 4)
11653 if ((idx != i) && idx > 3)
11662 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11663 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11664 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11665 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11666 unsigned TargetMask = 0;
11667 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11668 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11669 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11670 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11671 getShufflePSHUFLWImmediate(SVOp);
11672 V1 = NewV.getOperand(0);
11673 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11677 // Promote splats to a larger type which usually leads to more efficient code.
11678 // FIXME: Is this true if pshufb is available?
11679 if (SVOp->isSplat())
11680 return PromoteSplat(SVOp, DAG);
11682 // If we have SSSE3, and all words of the result are from 1 input vector,
11683 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11684 // is present, fall back to case 4.
11685 if (Subtarget->hasSSSE3()) {
11686 SmallVector<SDValue,16> pshufbMask;
11688 // If we have elements from both input vectors, set the high bit of the
11689 // shuffle mask element to zero out elements that come from V2 in the V1
11690 // mask, and elements that come from V1 in the V2 mask, so that the two
11691 // results can be OR'd together.
11692 bool TwoInputs = V1Used && V2Used;
11693 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11695 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11697 // Calculate the shuffle mask for the second input, shuffle it, and
11698 // OR it with the first shuffled input.
11699 CommuteVectorShuffleMask(MaskVals, 8);
11700 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11701 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11702 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11705 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11706 // and update MaskVals with new element order.
11707 std::bitset<8> InOrder;
11708 if (BestLoQuad >= 0) {
11709 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11710 for (int i = 0; i != 4; ++i) {
11711 int idx = MaskVals[i];
11714 } else if ((idx / 4) == BestLoQuad) {
11715 MaskV[i] = idx & 3;
11719 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11722 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11723 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11724 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11725 NewV.getOperand(0),
11726 getShufflePSHUFLWImmediate(SVOp), DAG);
11730 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11731 // and update MaskVals with the new element order.
11732 if (BestHiQuad >= 0) {
11733 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11734 for (unsigned i = 4; i != 8; ++i) {
11735 int idx = MaskVals[i];
11738 } else if ((idx / 4) == BestHiQuad) {
11739 MaskV[i] = (idx & 3) + 4;
11743 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11746 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11747 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11748 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11749 NewV.getOperand(0),
11750 getShufflePSHUFHWImmediate(SVOp), DAG);
11754 // In case BestHi & BestLo were both -1, which means each quadword has a word
11755 // from each of the four input quadwords, calculate the InOrder bitvector now
11756 // before falling through to the insert/extract cleanup.
11757 if (BestLoQuad == -1 && BestHiQuad == -1) {
11759 for (int i = 0; i != 8; ++i)
11760 if (MaskVals[i] < 0 || MaskVals[i] == i)
11764 // The other elements are put in the right place using pextrw and pinsrw.
11765 for (unsigned i = 0; i != 8; ++i) {
11768 int EltIdx = MaskVals[i];
11771 SDValue ExtOp = (EltIdx < 8) ?
11772 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11773 DAG.getIntPtrConstant(EltIdx)) :
11774 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11775 DAG.getIntPtrConstant(EltIdx - 8));
11776 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11777 DAG.getIntPtrConstant(i));
11782 /// \brief v16i16 shuffles
11784 /// FIXME: We only support generation of a single pshufb currently. We can
11785 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11786 /// well (e.g 2 x pshufb + 1 x por).
11788 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11789 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11790 SDValue V1 = SVOp->getOperand(0);
11791 SDValue V2 = SVOp->getOperand(1);
11794 if (V2.getOpcode() != ISD::UNDEF)
11797 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11798 return getPSHUFB(MaskVals, V1, dl, DAG);
11801 // v16i8 shuffles - Prefer shuffles in the following order:
11802 // 1. [ssse3] 1 x pshufb
11803 // 2. [ssse3] 2 x pshufb + 1 x por
11804 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11805 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11806 const X86Subtarget* Subtarget,
11807 SelectionDAG &DAG) {
11808 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11809 SDValue V1 = SVOp->getOperand(0);
11810 SDValue V2 = SVOp->getOperand(1);
11812 ArrayRef<int> MaskVals = SVOp->getMask();
11814 // Promote splats to a larger type which usually leads to more efficient code.
11815 // FIXME: Is this true if pshufb is available?
11816 if (SVOp->isSplat())
11817 return PromoteSplat(SVOp, DAG);
11819 // If we have SSSE3, case 1 is generated when all result bytes come from
11820 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11821 // present, fall back to case 3.
11823 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11824 if (Subtarget->hasSSSE3()) {
11825 SmallVector<SDValue,16> pshufbMask;
11827 // If all result elements are from one input vector, then only translate
11828 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11830 // Otherwise, we have elements from both input vectors, and must zero out
11831 // elements that come from V2 in the first mask, and V1 in the second mask
11832 // so that we can OR them together.
11833 for (unsigned i = 0; i != 16; ++i) {
11834 int EltIdx = MaskVals[i];
11835 if (EltIdx < 0 || EltIdx >= 16)
11837 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11839 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11840 DAG.getNode(ISD::BUILD_VECTOR, dl,
11841 MVT::v16i8, pshufbMask));
11843 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11844 // the 2nd operand if it's undefined or zero.
11845 if (V2.getOpcode() == ISD::UNDEF ||
11846 ISD::isBuildVectorAllZeros(V2.getNode()))
11849 // Calculate the shuffle mask for the second input, shuffle it, and
11850 // OR it with the first shuffled input.
11851 pshufbMask.clear();
11852 for (unsigned i = 0; i != 16; ++i) {
11853 int EltIdx = MaskVals[i];
11854 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11855 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11857 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11858 DAG.getNode(ISD::BUILD_VECTOR, dl,
11859 MVT::v16i8, pshufbMask));
11860 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11863 // No SSSE3 - Calculate in place words and then fix all out of place words
11864 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11865 // the 16 different words that comprise the two doublequadword input vectors.
11866 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11867 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11869 for (int i = 0; i != 8; ++i) {
11870 int Elt0 = MaskVals[i*2];
11871 int Elt1 = MaskVals[i*2+1];
11873 // This word of the result is all undef, skip it.
11874 if (Elt0 < 0 && Elt1 < 0)
11877 // This word of the result is already in the correct place, skip it.
11878 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11881 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11882 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11885 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11886 // using a single extract together, load it and store it.
11887 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11888 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11889 DAG.getIntPtrConstant(Elt1 / 2));
11890 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11891 DAG.getIntPtrConstant(i));
11895 // If Elt1 is defined, extract it from the appropriate source. If the
11896 // source byte is not also odd, shift the extracted word left 8 bits
11897 // otherwise clear the bottom 8 bits if we need to do an or.
11899 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11900 DAG.getIntPtrConstant(Elt1 / 2));
11901 if ((Elt1 & 1) == 0)
11902 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11904 TLI.getShiftAmountTy(InsElt.getValueType())));
11905 else if (Elt0 >= 0)
11906 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11907 DAG.getConstant(0xFF00, MVT::i16));
11909 // If Elt0 is defined, extract it from the appropriate source. If the
11910 // source byte is not also even, shift the extracted word right 8 bits. If
11911 // Elt1 was also defined, OR the extracted values together before
11912 // inserting them in the result.
11914 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11915 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11916 if ((Elt0 & 1) != 0)
11917 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11919 TLI.getShiftAmountTy(InsElt0.getValueType())));
11920 else if (Elt1 >= 0)
11921 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11922 DAG.getConstant(0x00FF, MVT::i16));
11923 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11926 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11927 DAG.getIntPtrConstant(i));
11929 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11932 // v32i8 shuffles - Translate to VPSHUFB if possible.
11934 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11935 const X86Subtarget *Subtarget,
11936 SelectionDAG &DAG) {
11937 MVT VT = SVOp->getSimpleValueType(0);
11938 SDValue V1 = SVOp->getOperand(0);
11939 SDValue V2 = SVOp->getOperand(1);
11941 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11943 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11944 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11945 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11947 // VPSHUFB may be generated if
11948 // (1) one of input vector is undefined or zeroinitializer.
11949 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11950 // And (2) the mask indexes don't cross the 128-bit lane.
11951 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11952 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11955 if (V1IsAllZero && !V2IsAllZero) {
11956 CommuteVectorShuffleMask(MaskVals, 32);
11959 return getPSHUFB(MaskVals, V1, dl, DAG);
11962 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11963 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11964 /// done when every pair / quad of shuffle mask elements point to elements in
11965 /// the right sequence. e.g.
11966 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11968 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11969 SelectionDAG &DAG) {
11970 MVT VT = SVOp->getSimpleValueType(0);
11972 unsigned NumElems = VT.getVectorNumElements();
11975 switch (VT.SimpleTy) {
11976 default: llvm_unreachable("Unexpected!");
11979 return SDValue(SVOp, 0);
11980 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11981 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11982 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11983 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11984 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11985 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11988 SmallVector<int, 8> MaskVec;
11989 for (unsigned i = 0; i != NumElems; i += Scale) {
11991 for (unsigned j = 0; j != Scale; ++j) {
11992 int EltIdx = SVOp->getMaskElt(i+j);
11996 StartIdx = (EltIdx / Scale);
11997 if (EltIdx != (int)(StartIdx*Scale + j))
12000 MaskVec.push_back(StartIdx);
12003 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12004 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12005 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12008 /// getVZextMovL - Return a zero-extending vector move low node.
12010 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12011 SDValue SrcOp, SelectionDAG &DAG,
12012 const X86Subtarget *Subtarget, SDLoc dl) {
12013 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12014 LoadSDNode *LD = nullptr;
12015 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12016 LD = dyn_cast<LoadSDNode>(SrcOp);
12018 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12020 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12021 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12022 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12023 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12024 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12026 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12027 return DAG.getNode(ISD::BITCAST, dl, VT,
12028 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12029 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12031 SrcOp.getOperand(0)
12037 return DAG.getNode(ISD::BITCAST, dl, VT,
12038 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12039 DAG.getNode(ISD::BITCAST, dl,
12043 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12044 /// which could not be matched by any known target speficic shuffle
12046 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12048 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12049 if (NewOp.getNode())
12052 MVT VT = SVOp->getSimpleValueType(0);
12054 unsigned NumElems = VT.getVectorNumElements();
12055 unsigned NumLaneElems = NumElems / 2;
12058 MVT EltVT = VT.getVectorElementType();
12059 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12062 SmallVector<int, 16> Mask;
12063 for (unsigned l = 0; l < 2; ++l) {
12064 // Build a shuffle mask for the output, discovering on the fly which
12065 // input vectors to use as shuffle operands (recorded in InputUsed).
12066 // If building a suitable shuffle vector proves too hard, then bail
12067 // out with UseBuildVector set.
12068 bool UseBuildVector = false;
12069 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12070 unsigned LaneStart = l * NumLaneElems;
12071 for (unsigned i = 0; i != NumLaneElems; ++i) {
12072 // The mask element. This indexes into the input.
12073 int Idx = SVOp->getMaskElt(i+LaneStart);
12075 // the mask element does not index into any input vector.
12076 Mask.push_back(-1);
12080 // The input vector this mask element indexes into.
12081 int Input = Idx / NumLaneElems;
12083 // Turn the index into an offset from the start of the input vector.
12084 Idx -= Input * NumLaneElems;
12086 // Find or create a shuffle vector operand to hold this input.
12088 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12089 if (InputUsed[OpNo] == Input)
12090 // This input vector is already an operand.
12092 if (InputUsed[OpNo] < 0) {
12093 // Create a new operand for this input vector.
12094 InputUsed[OpNo] = Input;
12099 if (OpNo >= array_lengthof(InputUsed)) {
12100 // More than two input vectors used! Give up on trying to create a
12101 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12102 UseBuildVector = true;
12106 // Add the mask index for the new shuffle vector.
12107 Mask.push_back(Idx + OpNo * NumLaneElems);
12110 if (UseBuildVector) {
12111 SmallVector<SDValue, 16> SVOps;
12112 for (unsigned i = 0; i != NumLaneElems; ++i) {
12113 // The mask element. This indexes into the input.
12114 int Idx = SVOp->getMaskElt(i+LaneStart);
12116 SVOps.push_back(DAG.getUNDEF(EltVT));
12120 // The input vector this mask element indexes into.
12121 int Input = Idx / NumElems;
12123 // Turn the index into an offset from the start of the input vector.
12124 Idx -= Input * NumElems;
12126 // Extract the vector element by hand.
12127 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12128 SVOp->getOperand(Input),
12129 DAG.getIntPtrConstant(Idx)));
12132 // Construct the output using a BUILD_VECTOR.
12133 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12134 } else if (InputUsed[0] < 0) {
12135 // No input vectors were used! The result is undefined.
12136 Output[l] = DAG.getUNDEF(NVT);
12138 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12139 (InputUsed[0] % 2) * NumLaneElems,
12141 // If only one input was used, use an undefined vector for the other.
12142 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12143 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12144 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12145 // At least one input vector was used. Create a new shuffle vector.
12146 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12152 // Concatenate the result back
12153 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12156 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12157 /// 4 elements, and match them with several different shuffle types.
12159 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12160 SDValue V1 = SVOp->getOperand(0);
12161 SDValue V2 = SVOp->getOperand(1);
12163 MVT VT = SVOp->getSimpleValueType(0);
12165 assert(VT.is128BitVector() && "Unsupported vector size");
12167 std::pair<int, int> Locs[4];
12168 int Mask1[] = { -1, -1, -1, -1 };
12169 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12171 unsigned NumHi = 0;
12172 unsigned NumLo = 0;
12173 for (unsigned i = 0; i != 4; ++i) {
12174 int Idx = PermMask[i];
12176 Locs[i] = std::make_pair(-1, -1);
12178 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12180 Locs[i] = std::make_pair(0, NumLo);
12181 Mask1[NumLo] = Idx;
12184 Locs[i] = std::make_pair(1, NumHi);
12186 Mask1[2+NumHi] = Idx;
12192 if (NumLo <= 2 && NumHi <= 2) {
12193 // If no more than two elements come from either vector. This can be
12194 // implemented with two shuffles. First shuffle gather the elements.
12195 // The second shuffle, which takes the first shuffle as both of its
12196 // vector operands, put the elements into the right order.
12197 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12199 int Mask2[] = { -1, -1, -1, -1 };
12201 for (unsigned i = 0; i != 4; ++i)
12202 if (Locs[i].first != -1) {
12203 unsigned Idx = (i < 2) ? 0 : 4;
12204 Idx += Locs[i].first * 2 + Locs[i].second;
12208 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12211 if (NumLo == 3 || NumHi == 3) {
12212 // Otherwise, we must have three elements from one vector, call it X, and
12213 // one element from the other, call it Y. First, use a shufps to build an
12214 // intermediate vector with the one element from Y and the element from X
12215 // that will be in the same half in the final destination (the indexes don't
12216 // matter). Then, use a shufps to build the final vector, taking the half
12217 // containing the element from Y from the intermediate, and the other half
12220 // Normalize it so the 3 elements come from V1.
12221 CommuteVectorShuffleMask(PermMask, 4);
12225 // Find the element from V2.
12227 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12228 int Val = PermMask[HiIndex];
12235 Mask1[0] = PermMask[HiIndex];
12237 Mask1[2] = PermMask[HiIndex^1];
12239 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12241 if (HiIndex >= 2) {
12242 Mask1[0] = PermMask[0];
12243 Mask1[1] = PermMask[1];
12244 Mask1[2] = HiIndex & 1 ? 6 : 4;
12245 Mask1[3] = HiIndex & 1 ? 4 : 6;
12246 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12249 Mask1[0] = HiIndex & 1 ? 2 : 0;
12250 Mask1[1] = HiIndex & 1 ? 0 : 2;
12251 Mask1[2] = PermMask[2];
12252 Mask1[3] = PermMask[3];
12257 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12260 // Break it into (shuffle shuffle_hi, shuffle_lo).
12261 int LoMask[] = { -1, -1, -1, -1 };
12262 int HiMask[] = { -1, -1, -1, -1 };
12264 int *MaskPtr = LoMask;
12265 unsigned MaskIdx = 0;
12266 unsigned LoIdx = 0;
12267 unsigned HiIdx = 2;
12268 for (unsigned i = 0; i != 4; ++i) {
12275 int Idx = PermMask[i];
12277 Locs[i] = std::make_pair(-1, -1);
12278 } else if (Idx < 4) {
12279 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12280 MaskPtr[LoIdx] = Idx;
12283 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12284 MaskPtr[HiIdx] = Idx;
12289 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12290 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12291 int MaskOps[] = { -1, -1, -1, -1 };
12292 for (unsigned i = 0; i != 4; ++i)
12293 if (Locs[i].first != -1)
12294 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12295 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12298 static bool MayFoldVectorLoad(SDValue V) {
12299 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12300 V = V.getOperand(0);
12302 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12303 V = V.getOperand(0);
12304 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12305 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12306 // BUILD_VECTOR (load), undef
12307 V = V.getOperand(0);
12309 return MayFoldLoad(V);
12313 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12314 MVT VT = Op.getSimpleValueType();
12316 // Canonicalize to v2f64.
12317 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12318 return DAG.getNode(ISD::BITCAST, dl, VT,
12319 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12324 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12326 SDValue V1 = Op.getOperand(0);
12327 SDValue V2 = Op.getOperand(1);
12328 MVT VT = Op.getSimpleValueType();
12330 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12332 if (HasSSE2 && VT == MVT::v2f64)
12333 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12335 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12336 return DAG.getNode(ISD::BITCAST, dl, VT,
12337 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12338 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12339 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12343 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12344 SDValue V1 = Op.getOperand(0);
12345 SDValue V2 = Op.getOperand(1);
12346 MVT VT = Op.getSimpleValueType();
12348 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12349 "unsupported shuffle type");
12351 if (V2.getOpcode() == ISD::UNDEF)
12355 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12359 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12360 SDValue V1 = Op.getOperand(0);
12361 SDValue V2 = Op.getOperand(1);
12362 MVT VT = Op.getSimpleValueType();
12363 unsigned NumElems = VT.getVectorNumElements();
12365 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12366 // operand of these instructions is only memory, so check if there's a
12367 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12369 bool CanFoldLoad = false;
12371 // Trivial case, when V2 comes from a load.
12372 if (MayFoldVectorLoad(V2))
12373 CanFoldLoad = true;
12375 // When V1 is a load, it can be folded later into a store in isel, example:
12376 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12378 // (MOVLPSmr addr:$src1, VR128:$src2)
12379 // So, recognize this potential and also use MOVLPS or MOVLPD
12380 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12381 CanFoldLoad = true;
12383 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12385 if (HasSSE2 && NumElems == 2)
12386 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12389 // If we don't care about the second element, proceed to use movss.
12390 if (SVOp->getMaskElt(1) != -1)
12391 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12394 // movl and movlp will both match v2i64, but v2i64 is never matched by
12395 // movl earlier because we make it strict to avoid messing with the movlp load
12396 // folding logic (see the code above getMOVLP call). Match it here then,
12397 // this is horrible, but will stay like this until we move all shuffle
12398 // matching to x86 specific nodes. Note that for the 1st condition all
12399 // types are matched with movsd.
12401 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12402 // as to remove this logic from here, as much as possible
12403 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12404 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12405 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12408 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12410 // Invert the operand order and use SHUFPS to match it.
12411 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12412 getShuffleSHUFImmediate(SVOp), DAG);
12415 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12416 SelectionDAG &DAG) {
12418 MVT VT = Load->getSimpleValueType(0);
12419 MVT EVT = VT.getVectorElementType();
12420 SDValue Addr = Load->getOperand(1);
12421 SDValue NewAddr = DAG.getNode(
12422 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12423 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12426 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12427 DAG.getMachineFunction().getMachineMemOperand(
12428 Load->getMemOperand(), 0, EVT.getStoreSize()));
12432 // It is only safe to call this function if isINSERTPSMask is true for
12433 // this shufflevector mask.
12434 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12435 SelectionDAG &DAG) {
12436 // Generate an insertps instruction when inserting an f32 from memory onto a
12437 // v4f32 or when copying a member from one v4f32 to another.
12438 // We also use it for transferring i32 from one register to another,
12439 // since it simply copies the same bits.
12440 // If we're transferring an i32 from memory to a specific element in a
12441 // register, we output a generic DAG that will match the PINSRD
12443 MVT VT = SVOp->getSimpleValueType(0);
12444 MVT EVT = VT.getVectorElementType();
12445 SDValue V1 = SVOp->getOperand(0);
12446 SDValue V2 = SVOp->getOperand(1);
12447 auto Mask = SVOp->getMask();
12448 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12449 "unsupported vector type for insertps/pinsrd");
12451 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12452 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12453 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12457 unsigned DestIndex;
12461 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12464 // If we have 1 element from each vector, we have to check if we're
12465 // changing V1's element's place. If so, we're done. Otherwise, we
12466 // should assume we're changing V2's element's place and behave
12468 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12469 assert(DestIndex <= INT32_MAX && "truncated destination index");
12470 if (FromV1 == FromV2 &&
12471 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12475 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12478 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12479 "More than one element from V1 and from V2, or no elements from one "
12480 "of the vectors. This case should not have returned true from "
12485 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12488 // Get an index into the source vector in the range [0,4) (the mask is
12489 // in the range [0,8) because it can address V1 and V2)
12490 unsigned SrcIndex = Mask[DestIndex] % 4;
12491 if (MayFoldLoad(From)) {
12492 // Trivial case, when From comes from a load and is only used by the
12493 // shuffle. Make it use insertps from the vector that we need from that
12496 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12497 if (!NewLoad.getNode())
12500 if (EVT == MVT::f32) {
12501 // Create this as a scalar to vector to match the instruction pattern.
12502 SDValue LoadScalarToVector =
12503 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12504 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12505 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12507 } else { // EVT == MVT::i32
12508 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12509 // instruction, to match the PINSRD instruction, which loads an i32 to a
12510 // certain vector element.
12511 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12512 DAG.getConstant(DestIndex, MVT::i32));
12516 // Vector-element-to-vector
12517 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12518 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12521 // Reduce a vector shuffle to zext.
12522 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12523 SelectionDAG &DAG) {
12524 // PMOVZX is only available from SSE41.
12525 if (!Subtarget->hasSSE41())
12528 MVT VT = Op.getSimpleValueType();
12530 // Only AVX2 support 256-bit vector integer extending.
12531 if (!Subtarget->hasInt256() && VT.is256BitVector())
12534 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12536 SDValue V1 = Op.getOperand(0);
12537 SDValue V2 = Op.getOperand(1);
12538 unsigned NumElems = VT.getVectorNumElements();
12540 // Extending is an unary operation and the element type of the source vector
12541 // won't be equal to or larger than i64.
12542 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12543 VT.getVectorElementType() == MVT::i64)
12546 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12547 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12548 while ((1U << Shift) < NumElems) {
12549 if (SVOp->getMaskElt(1U << Shift) == 1)
12552 // The maximal ratio is 8, i.e. from i8 to i64.
12557 // Check the shuffle mask.
12558 unsigned Mask = (1U << Shift) - 1;
12559 for (unsigned i = 0; i != NumElems; ++i) {
12560 int EltIdx = SVOp->getMaskElt(i);
12561 if ((i & Mask) != 0 && EltIdx != -1)
12563 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12567 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12568 MVT NeVT = MVT::getIntegerVT(NBits);
12569 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12571 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12574 return DAG.getNode(ISD::BITCAST, DL, VT,
12575 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12578 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12579 SelectionDAG &DAG) {
12580 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12581 MVT VT = Op.getSimpleValueType();
12583 SDValue V1 = Op.getOperand(0);
12584 SDValue V2 = Op.getOperand(1);
12586 if (isZeroShuffle(SVOp))
12587 return getZeroVector(VT, Subtarget, DAG, dl);
12589 // Handle splat operations
12590 if (SVOp->isSplat()) {
12591 // Use vbroadcast whenever the splat comes from a foldable load
12592 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12593 if (Broadcast.getNode())
12597 // Check integer expanding shuffles.
12598 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12599 if (NewOp.getNode())
12602 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12604 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12605 VT == MVT::v32i8) {
12606 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12607 if (NewOp.getNode())
12608 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12609 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12610 // FIXME: Figure out a cleaner way to do this.
12611 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12612 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12613 if (NewOp.getNode()) {
12614 MVT NewVT = NewOp.getSimpleValueType();
12615 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12616 NewVT, true, false))
12617 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12620 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12621 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12622 if (NewOp.getNode()) {
12623 MVT NewVT = NewOp.getSimpleValueType();
12624 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12625 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12634 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12635 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12636 SDValue V1 = Op.getOperand(0);
12637 SDValue V2 = Op.getOperand(1);
12638 MVT VT = Op.getSimpleValueType();
12640 unsigned NumElems = VT.getVectorNumElements();
12641 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12642 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12643 bool V1IsSplat = false;
12644 bool V2IsSplat = false;
12645 bool HasSSE2 = Subtarget->hasSSE2();
12646 bool HasFp256 = Subtarget->hasFp256();
12647 bool HasInt256 = Subtarget->hasInt256();
12648 MachineFunction &MF = DAG.getMachineFunction();
12650 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12652 // Check if we should use the experimental vector shuffle lowering. If so,
12653 // delegate completely to that code path.
12654 if (ExperimentalVectorShuffleLowering)
12655 return lowerVectorShuffle(Op, Subtarget, DAG);
12657 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12659 if (V1IsUndef && V2IsUndef)
12660 return DAG.getUNDEF(VT);
12662 // When we create a shuffle node we put the UNDEF node to second operand,
12663 // but in some cases the first operand may be transformed to UNDEF.
12664 // In this case we should just commute the node.
12666 return DAG.getCommutedVectorShuffle(*SVOp);
12668 // Vector shuffle lowering takes 3 steps:
12670 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12671 // narrowing and commutation of operands should be handled.
12672 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12674 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12675 // so the shuffle can be broken into other shuffles and the legalizer can
12676 // try the lowering again.
12678 // The general idea is that no vector_shuffle operation should be left to
12679 // be matched during isel, all of them must be converted to a target specific
12682 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12683 // narrowing and commutation of operands should be handled. The actual code
12684 // doesn't include all of those, work in progress...
12685 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12686 if (NewOp.getNode())
12689 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12691 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12692 // unpckh_undef). Only use pshufd if speed is more important than size.
12693 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12694 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12695 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12696 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12698 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12699 V2IsUndef && MayFoldVectorLoad(V1))
12700 return getMOVDDup(Op, dl, V1, DAG);
12702 if (isMOVHLPS_v_undef_Mask(M, VT))
12703 return getMOVHighToLow(Op, dl, DAG);
12705 // Use to match splats
12706 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12707 (VT == MVT::v2f64 || VT == MVT::v2i64))
12708 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12710 if (isPSHUFDMask(M, VT)) {
12711 // The actual implementation will match the mask in the if above and then
12712 // during isel it can match several different instructions, not only pshufd
12713 // as its name says, sad but true, emulate the behavior for now...
12714 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12715 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12717 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12719 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12720 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12722 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12723 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12726 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12730 if (isPALIGNRMask(M, VT, Subtarget))
12731 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12732 getShufflePALIGNRImmediate(SVOp),
12735 if (isVALIGNMask(M, VT, Subtarget))
12736 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12737 getShuffleVALIGNImmediate(SVOp),
12740 // Check if this can be converted into a logical shift.
12741 bool isLeft = false;
12742 unsigned ShAmt = 0;
12744 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12745 if (isShift && ShVal.hasOneUse()) {
12746 // If the shifted value has multiple uses, it may be cheaper to use
12747 // v_set0 + movlhps or movhlps, etc.
12748 MVT EltVT = VT.getVectorElementType();
12749 ShAmt *= EltVT.getSizeInBits();
12750 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12753 if (isMOVLMask(M, VT)) {
12754 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12755 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12756 if (!isMOVLPMask(M, VT)) {
12757 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12758 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12760 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12761 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12765 // FIXME: fold these into legal mask.
12766 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12767 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12769 if (isMOVHLPSMask(M, VT))
12770 return getMOVHighToLow(Op, dl, DAG);
12772 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12773 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12775 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12776 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12778 if (isMOVLPMask(M, VT))
12779 return getMOVLP(Op, dl, DAG, HasSSE2);
12781 if (ShouldXformToMOVHLPS(M, VT) ||
12782 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12783 return DAG.getCommutedVectorShuffle(*SVOp);
12786 // No better options. Use a vshldq / vsrldq.
12787 MVT EltVT = VT.getVectorElementType();
12788 ShAmt *= EltVT.getSizeInBits();
12789 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12792 bool Commuted = false;
12793 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12794 // 1,1,1,1 -> v8i16 though.
12795 BitVector UndefElements;
12796 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12797 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12799 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12800 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12803 // Canonicalize the splat or undef, if present, to be on the RHS.
12804 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12805 CommuteVectorShuffleMask(M, NumElems);
12807 std::swap(V1IsSplat, V2IsSplat);
12811 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12812 // Shuffling low element of v1 into undef, just return v1.
12815 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12816 // the instruction selector will not match, so get a canonical MOVL with
12817 // swapped operands to undo the commute.
12818 return getMOVL(DAG, dl, VT, V2, V1);
12821 if (isUNPCKLMask(M, VT, HasInt256))
12822 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12824 if (isUNPCKHMask(M, VT, HasInt256))
12825 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12828 // Normalize mask so all entries that point to V2 points to its first
12829 // element then try to match unpck{h|l} again. If match, return a
12830 // new vector_shuffle with the corrected mask.p
12831 SmallVector<int, 8> NewMask(M.begin(), M.end());
12832 NormalizeMask(NewMask, NumElems);
12833 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12834 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12835 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12836 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12840 // Commute is back and try unpck* again.
12841 // FIXME: this seems wrong.
12842 CommuteVectorShuffleMask(M, NumElems);
12844 std::swap(V1IsSplat, V2IsSplat);
12846 if (isUNPCKLMask(M, VT, HasInt256))
12847 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12849 if (isUNPCKHMask(M, VT, HasInt256))
12850 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12853 // Normalize the node to match x86 shuffle ops if needed
12854 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12855 return DAG.getCommutedVectorShuffle(*SVOp);
12857 // The checks below are all present in isShuffleMaskLegal, but they are
12858 // inlined here right now to enable us to directly emit target specific
12859 // nodes, and remove one by one until they don't return Op anymore.
12861 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12862 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12863 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12864 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12867 if (isPSHUFHWMask(M, VT, HasInt256))
12868 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12869 getShufflePSHUFHWImmediate(SVOp),
12872 if (isPSHUFLWMask(M, VT, HasInt256))
12873 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12874 getShufflePSHUFLWImmediate(SVOp),
12877 unsigned MaskValue;
12878 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
12879 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12881 if (isSHUFPMask(M, VT))
12882 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12883 getShuffleSHUFImmediate(SVOp), DAG);
12885 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12886 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12887 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12888 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12890 //===--------------------------------------------------------------------===//
12891 // Generate target specific nodes for 128 or 256-bit shuffles only
12892 // supported in the AVX instruction set.
12895 // Handle VMOVDDUPY permutations
12896 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12897 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12899 // Handle VPERMILPS/D* permutations
12900 if (isVPERMILPMask(M, VT)) {
12901 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12902 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12903 getShuffleSHUFImmediate(SVOp), DAG);
12904 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12905 getShuffleSHUFImmediate(SVOp), DAG);
12909 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12910 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12911 Idx*(NumElems/2), DAG, dl);
12913 // Handle VPERM2F128/VPERM2I128 permutations
12914 if (isVPERM2X128Mask(M, VT, HasFp256))
12915 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12916 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12918 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12919 return getINSERTPS(SVOp, dl, DAG);
12922 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12923 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12925 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12926 VT.is512BitVector()) {
12927 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12928 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12929 SmallVector<SDValue, 16> permclMask;
12930 for (unsigned i = 0; i != NumElems; ++i) {
12931 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12934 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12936 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12937 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12938 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12939 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12940 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12943 //===--------------------------------------------------------------------===//
12944 // Since no target specific shuffle was selected for this generic one,
12945 // lower it into other known shuffles. FIXME: this isn't true yet, but
12946 // this is the plan.
12949 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12950 if (VT == MVT::v8i16) {
12951 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12952 if (NewOp.getNode())
12956 if (VT == MVT::v16i16 && HasInt256) {
12957 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12958 if (NewOp.getNode())
12962 if (VT == MVT::v16i8) {
12963 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12964 if (NewOp.getNode())
12968 if (VT == MVT::v32i8) {
12969 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12970 if (NewOp.getNode())
12974 // Handle all 128-bit wide vectors with 4 elements, and match them with
12975 // several different shuffle types.
12976 if (NumElems == 4 && VT.is128BitVector())
12977 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12979 // Handle general 256-bit shuffles
12980 if (VT.is256BitVector())
12981 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12986 // This function assumes its argument is a BUILD_VECTOR of constants or
12987 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12989 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12990 unsigned &MaskValue) {
12992 unsigned NumElems = BuildVector->getNumOperands();
12993 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12994 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12995 unsigned NumElemsInLane = NumElems / NumLanes;
12997 // Blend for v16i16 should be symetric for the both lanes.
12998 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12999 SDValue EltCond = BuildVector->getOperand(i);
13000 SDValue SndLaneEltCond =
13001 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13003 int Lane1Cond = -1, Lane2Cond = -1;
13004 if (isa<ConstantSDNode>(EltCond))
13005 Lane1Cond = !isZero(EltCond);
13006 if (isa<ConstantSDNode>(SndLaneEltCond))
13007 Lane2Cond = !isZero(SndLaneEltCond);
13009 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13010 // Lane1Cond != 0, means we want the first argument.
13011 // Lane1Cond == 0, means we want the second argument.
13012 // The encoding of this argument is 0 for the first argument, 1
13013 // for the second. Therefore, invert the condition.
13014 MaskValue |= !Lane1Cond << i;
13015 else if (Lane1Cond < 0)
13016 MaskValue |= !Lane2Cond << i;
13023 /// \brief Try to lower a VSELECT instruction to a vector shuffle.
13024 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
13025 const X86Subtarget *Subtarget,
13026 SelectionDAG &DAG) {
13027 SDValue Cond = Op.getOperand(0);
13028 SDValue LHS = Op.getOperand(1);
13029 SDValue RHS = Op.getOperand(2);
13031 MVT VT = Op.getSimpleValueType();
13033 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13035 auto *CondBV = cast<BuildVectorSDNode>(Cond);
13037 // Only non-legal VSELECTs reach this lowering, convert those into generic
13038 // shuffles and re-use the shuffle lowering path for blends.
13039 SmallVector<int, 32> Mask;
13040 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
13041 SDValue CondElt = CondBV->getOperand(i);
13043 isa<ConstantSDNode>(CondElt) ? i + (isZero(CondElt) ? Size : 0) : -1);
13045 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
13048 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13049 // A vselect where all conditions and data are constants can be optimized into
13050 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13051 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13052 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13053 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13056 // Try to lower this to a blend-style vector shuffle. This can handle all
13057 // constant condition cases.
13058 SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG);
13059 if (BlendOp.getNode())
13062 // Variable blends are only legal from SSE4.1 onward.
13063 if (!Subtarget->hasSSE41())
13066 // Some types for vselect were previously set to Expand, not Legal or
13067 // Custom. Return an empty SDValue so we fall-through to Expand, after
13068 // the Custom lowering phase.
13069 MVT VT = Op.getSimpleValueType();
13070 switch (VT.SimpleTy) {
13075 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13080 // We couldn't create a "Blend with immediate" node.
13081 // This node should still be legal, but we'll have to emit a blendv*
13086 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13087 MVT VT = Op.getSimpleValueType();
13090 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13093 if (VT.getSizeInBits() == 8) {
13094 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13095 Op.getOperand(0), Op.getOperand(1));
13096 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13097 DAG.getValueType(VT));
13098 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13101 if (VT.getSizeInBits() == 16) {
13102 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13103 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13105 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13106 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13107 DAG.getNode(ISD::BITCAST, dl,
13110 Op.getOperand(1)));
13111 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13112 Op.getOperand(0), Op.getOperand(1));
13113 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13114 DAG.getValueType(VT));
13115 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13118 if (VT == MVT::f32) {
13119 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13120 // the result back to FR32 register. It's only worth matching if the
13121 // result has a single use which is a store or a bitcast to i32. And in
13122 // the case of a store, it's not worth it if the index is a constant 0,
13123 // because a MOVSSmr can be used instead, which is smaller and faster.
13124 if (!Op.hasOneUse())
13126 SDNode *User = *Op.getNode()->use_begin();
13127 if ((User->getOpcode() != ISD::STORE ||
13128 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13129 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13130 (User->getOpcode() != ISD::BITCAST ||
13131 User->getValueType(0) != MVT::i32))
13133 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13134 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13137 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13140 if (VT == MVT::i32 || VT == MVT::i64) {
13141 // ExtractPS/pextrq works with constant index.
13142 if (isa<ConstantSDNode>(Op.getOperand(1)))
13148 /// Extract one bit from mask vector, like v16i1 or v8i1.
13149 /// AVX-512 feature.
13151 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13152 SDValue Vec = Op.getOperand(0);
13154 MVT VecVT = Vec.getSimpleValueType();
13155 SDValue Idx = Op.getOperand(1);
13156 MVT EltVT = Op.getSimpleValueType();
13158 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13159 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13160 "Unexpected vector type in ExtractBitFromMaskVector");
13162 // variable index can't be handled in mask registers,
13163 // extend vector to VR512
13164 if (!isa<ConstantSDNode>(Idx)) {
13165 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13166 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13167 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13168 ExtVT.getVectorElementType(), Ext, Idx);
13169 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13172 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13173 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13174 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13175 rc = getRegClassFor(MVT::v16i1);
13176 unsigned MaxSift = rc->getSize()*8 - 1;
13177 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13178 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13179 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13180 DAG.getConstant(MaxSift, MVT::i8));
13181 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13182 DAG.getIntPtrConstant(0));
13186 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13187 SelectionDAG &DAG) const {
13189 SDValue Vec = Op.getOperand(0);
13190 MVT VecVT = Vec.getSimpleValueType();
13191 SDValue Idx = Op.getOperand(1);
13193 if (Op.getSimpleValueType() == MVT::i1)
13194 return ExtractBitFromMaskVector(Op, DAG);
13196 if (!isa<ConstantSDNode>(Idx)) {
13197 if (VecVT.is512BitVector() ||
13198 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13199 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13202 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13203 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13204 MaskEltVT.getSizeInBits());
13206 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13207 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13208 getZeroVector(MaskVT, Subtarget, DAG, dl),
13209 Idx, DAG.getConstant(0, getPointerTy()));
13210 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13211 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13212 Perm, DAG.getConstant(0, getPointerTy()));
13217 // If this is a 256-bit vector result, first extract the 128-bit vector and
13218 // then extract the element from the 128-bit vector.
13219 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13221 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13222 // Get the 128-bit vector.
13223 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13224 MVT EltVT = VecVT.getVectorElementType();
13226 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13228 //if (IdxVal >= NumElems/2)
13229 // IdxVal -= NumElems/2;
13230 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13231 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13232 DAG.getConstant(IdxVal, MVT::i32));
13235 assert(VecVT.is128BitVector() && "Unexpected vector length");
13237 if (Subtarget->hasSSE41()) {
13238 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13243 MVT VT = Op.getSimpleValueType();
13244 // TODO: handle v16i8.
13245 if (VT.getSizeInBits() == 16) {
13246 SDValue Vec = Op.getOperand(0);
13247 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13249 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13250 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13251 DAG.getNode(ISD::BITCAST, dl,
13253 Op.getOperand(1)));
13254 // Transform it so it match pextrw which produces a 32-bit result.
13255 MVT EltVT = MVT::i32;
13256 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13257 Op.getOperand(0), Op.getOperand(1));
13258 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13259 DAG.getValueType(VT));
13260 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13263 if (VT.getSizeInBits() == 32) {
13264 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13268 // SHUFPS the element to the lowest double word, then movss.
13269 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13270 MVT VVT = Op.getOperand(0).getSimpleValueType();
13271 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13272 DAG.getUNDEF(VVT), Mask);
13273 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13274 DAG.getIntPtrConstant(0));
13277 if (VT.getSizeInBits() == 64) {
13278 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13279 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13280 // to match extract_elt for f64.
13281 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13285 // UNPCKHPD the element to the lowest double word, then movsd.
13286 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13287 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13288 int Mask[2] = { 1, -1 };
13289 MVT VVT = Op.getOperand(0).getSimpleValueType();
13290 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13291 DAG.getUNDEF(VVT), Mask);
13292 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13293 DAG.getIntPtrConstant(0));
13299 /// Insert one bit to mask vector, like v16i1 or v8i1.
13300 /// AVX-512 feature.
13302 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13304 SDValue Vec = Op.getOperand(0);
13305 SDValue Elt = Op.getOperand(1);
13306 SDValue Idx = Op.getOperand(2);
13307 MVT VecVT = Vec.getSimpleValueType();
13309 if (!isa<ConstantSDNode>(Idx)) {
13310 // Non constant index. Extend source and destination,
13311 // insert element and then truncate the result.
13312 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13313 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13314 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13315 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13316 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13317 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13320 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13321 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13322 if (Vec.getOpcode() == ISD::UNDEF)
13323 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13324 DAG.getConstant(IdxVal, MVT::i8));
13325 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13326 unsigned MaxSift = rc->getSize()*8 - 1;
13327 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13328 DAG.getConstant(MaxSift, MVT::i8));
13329 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13330 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13331 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13334 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13335 SelectionDAG &DAG) const {
13336 MVT VT = Op.getSimpleValueType();
13337 MVT EltVT = VT.getVectorElementType();
13339 if (EltVT == MVT::i1)
13340 return InsertBitToMaskVector(Op, DAG);
13343 SDValue N0 = Op.getOperand(0);
13344 SDValue N1 = Op.getOperand(1);
13345 SDValue N2 = Op.getOperand(2);
13346 if (!isa<ConstantSDNode>(N2))
13348 auto *N2C = cast<ConstantSDNode>(N2);
13349 unsigned IdxVal = N2C->getZExtValue();
13351 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13352 // into that, and then insert the subvector back into the result.
13353 if (VT.is256BitVector() || VT.is512BitVector()) {
13354 // Get the desired 128-bit vector half.
13355 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13357 // Insert the element into the desired half.
13358 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13359 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13361 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13362 DAG.getConstant(IdxIn128, MVT::i32));
13364 // Insert the changed part back to the 256-bit vector
13365 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13367 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13369 if (Subtarget->hasSSE41()) {
13370 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13372 if (VT == MVT::v8i16) {
13373 Opc = X86ISD::PINSRW;
13375 assert(VT == MVT::v16i8);
13376 Opc = X86ISD::PINSRB;
13379 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13381 if (N1.getValueType() != MVT::i32)
13382 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13383 if (N2.getValueType() != MVT::i32)
13384 N2 = DAG.getIntPtrConstant(IdxVal);
13385 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13388 if (EltVT == MVT::f32) {
13389 // Bits [7:6] of the constant are the source select. This will always be
13390 // zero here. The DAG Combiner may combine an extract_elt index into
13392 // bits. For example (insert (extract, 3), 2) could be matched by
13394 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13395 // Bits [5:4] of the constant are the destination select. This is the
13396 // value of the incoming immediate.
13397 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13398 // combine either bitwise AND or insert of float 0.0 to set these bits.
13399 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13400 // Create this as a scalar to vector..
13401 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13402 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13405 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13406 // PINSR* works with constant index.
13411 if (EltVT == MVT::i8)
13414 if (EltVT.getSizeInBits() == 16) {
13415 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13416 // as its second argument.
13417 if (N1.getValueType() != MVT::i32)
13418 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13419 if (N2.getValueType() != MVT::i32)
13420 N2 = DAG.getIntPtrConstant(IdxVal);
13421 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13426 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13428 MVT OpVT = Op.getSimpleValueType();
13430 // If this is a 256-bit vector result, first insert into a 128-bit
13431 // vector and then insert into the 256-bit vector.
13432 if (!OpVT.is128BitVector()) {
13433 // Insert into a 128-bit vector.
13434 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13435 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13436 OpVT.getVectorNumElements() / SizeFactor);
13438 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13440 // Insert the 128-bit vector.
13441 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13444 if (OpVT == MVT::v1i64 &&
13445 Op.getOperand(0).getValueType() == MVT::i64)
13446 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13448 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13449 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13450 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13451 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13454 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13455 // a simple subregister reference or explicit instructions to grab
13456 // upper bits of a vector.
13457 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13458 SelectionDAG &DAG) {
13460 SDValue In = Op.getOperand(0);
13461 SDValue Idx = Op.getOperand(1);
13462 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13463 MVT ResVT = Op.getSimpleValueType();
13464 MVT InVT = In.getSimpleValueType();
13466 if (Subtarget->hasFp256()) {
13467 if (ResVT.is128BitVector() &&
13468 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13469 isa<ConstantSDNode>(Idx)) {
13470 return Extract128BitVector(In, IdxVal, DAG, dl);
13472 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13473 isa<ConstantSDNode>(Idx)) {
13474 return Extract256BitVector(In, IdxVal, DAG, dl);
13480 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13481 // simple superregister reference or explicit instructions to insert
13482 // the upper bits of a vector.
13483 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13484 SelectionDAG &DAG) {
13485 if (!Subtarget->hasAVX())
13489 SDValue Vec = Op.getOperand(0);
13490 SDValue SubVec = Op.getOperand(1);
13491 SDValue Idx = Op.getOperand(2);
13493 if (!isa<ConstantSDNode>(Idx))
13496 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13497 MVT OpVT = Op.getSimpleValueType();
13498 MVT SubVecVT = SubVec.getSimpleValueType();
13500 // Fold two 16-byte subvector loads into one 32-byte load:
13501 // (insert_subvector (insert_subvector undef, (load addr), 0),
13502 // (load addr + 16), Elts/2)
13504 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13505 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13506 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13507 !Subtarget->isUnalignedMem32Slow()) {
13508 SDValue SubVec2 = Vec.getOperand(1);
13509 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13510 if (Idx2->getZExtValue() == 0) {
13511 SDValue Ops[] = { SubVec2, SubVec };
13512 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13519 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13520 SubVecVT.is128BitVector())
13521 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13523 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13524 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13529 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13530 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13531 // one of the above mentioned nodes. It has to be wrapped because otherwise
13532 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13533 // be used to form addressing mode. These wrapped nodes will be selected
13536 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13537 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13539 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13540 // global base reg.
13541 unsigned char OpFlag = 0;
13542 unsigned WrapperKind = X86ISD::Wrapper;
13543 CodeModel::Model M = DAG.getTarget().getCodeModel();
13545 if (Subtarget->isPICStyleRIPRel() &&
13546 (M == CodeModel::Small || M == CodeModel::Kernel))
13547 WrapperKind = X86ISD::WrapperRIP;
13548 else if (Subtarget->isPICStyleGOT())
13549 OpFlag = X86II::MO_GOTOFF;
13550 else if (Subtarget->isPICStyleStubPIC())
13551 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13553 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13554 CP->getAlignment(),
13555 CP->getOffset(), OpFlag);
13557 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13558 // With PIC, the address is actually $g + Offset.
13560 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13561 DAG.getNode(X86ISD::GlobalBaseReg,
13562 SDLoc(), getPointerTy()),
13569 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13570 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13572 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13573 // global base reg.
13574 unsigned char OpFlag = 0;
13575 unsigned WrapperKind = X86ISD::Wrapper;
13576 CodeModel::Model M = DAG.getTarget().getCodeModel();
13578 if (Subtarget->isPICStyleRIPRel() &&
13579 (M == CodeModel::Small || M == CodeModel::Kernel))
13580 WrapperKind = X86ISD::WrapperRIP;
13581 else if (Subtarget->isPICStyleGOT())
13582 OpFlag = X86II::MO_GOTOFF;
13583 else if (Subtarget->isPICStyleStubPIC())
13584 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13586 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13589 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13591 // With PIC, the address is actually $g + Offset.
13593 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13594 DAG.getNode(X86ISD::GlobalBaseReg,
13595 SDLoc(), getPointerTy()),
13602 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13603 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13605 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13606 // global base reg.
13607 unsigned char OpFlag = 0;
13608 unsigned WrapperKind = X86ISD::Wrapper;
13609 CodeModel::Model M = DAG.getTarget().getCodeModel();
13611 if (Subtarget->isPICStyleRIPRel() &&
13612 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13613 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13614 OpFlag = X86II::MO_GOTPCREL;
13615 WrapperKind = X86ISD::WrapperRIP;
13616 } else if (Subtarget->isPICStyleGOT()) {
13617 OpFlag = X86II::MO_GOT;
13618 } else if (Subtarget->isPICStyleStubPIC()) {
13619 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13620 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13621 OpFlag = X86II::MO_DARWIN_NONLAZY;
13624 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13627 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13629 // With PIC, the address is actually $g + Offset.
13630 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13631 !Subtarget->is64Bit()) {
13632 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13633 DAG.getNode(X86ISD::GlobalBaseReg,
13634 SDLoc(), getPointerTy()),
13638 // For symbols that require a load from a stub to get the address, emit the
13640 if (isGlobalStubReference(OpFlag))
13641 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13642 MachinePointerInfo::getGOT(), false, false, false, 0);
13648 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13649 // Create the TargetBlockAddressAddress node.
13650 unsigned char OpFlags =
13651 Subtarget->ClassifyBlockAddressReference();
13652 CodeModel::Model M = DAG.getTarget().getCodeModel();
13653 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13654 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13656 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13659 if (Subtarget->isPICStyleRIPRel() &&
13660 (M == CodeModel::Small || M == CodeModel::Kernel))
13661 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13663 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13665 // With PIC, the address is actually $g + Offset.
13666 if (isGlobalRelativeToPICBase(OpFlags)) {
13667 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13668 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13676 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13677 int64_t Offset, SelectionDAG &DAG) const {
13678 // Create the TargetGlobalAddress node, folding in the constant
13679 // offset if it is legal.
13680 unsigned char OpFlags =
13681 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13682 CodeModel::Model M = DAG.getTarget().getCodeModel();
13684 if (OpFlags == X86II::MO_NO_FLAG &&
13685 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13686 // A direct static reference to a global.
13687 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13690 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13693 if (Subtarget->isPICStyleRIPRel() &&
13694 (M == CodeModel::Small || M == CodeModel::Kernel))
13695 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13697 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13699 // With PIC, the address is actually $g + Offset.
13700 if (isGlobalRelativeToPICBase(OpFlags)) {
13701 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13702 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13706 // For globals that require a load from a stub to get the address, emit the
13708 if (isGlobalStubReference(OpFlags))
13709 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13710 MachinePointerInfo::getGOT(), false, false, false, 0);
13712 // If there was a non-zero offset that we didn't fold, create an explicit
13713 // addition for it.
13715 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13716 DAG.getConstant(Offset, getPointerTy()));
13722 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13723 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13724 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13725 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13729 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13730 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13731 unsigned char OperandFlags, bool LocalDynamic = false) {
13732 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13733 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13735 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13736 GA->getValueType(0),
13740 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13744 SDValue Ops[] = { Chain, TGA, *InFlag };
13745 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13747 SDValue Ops[] = { Chain, TGA };
13748 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13751 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13752 MFI->setAdjustsStack(true);
13753 MFI->setHasCalls(true);
13755 SDValue Flag = Chain.getValue(1);
13756 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13759 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13761 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13764 SDLoc dl(GA); // ? function entry point might be better
13765 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13766 DAG.getNode(X86ISD::GlobalBaseReg,
13767 SDLoc(), PtrVT), InFlag);
13768 InFlag = Chain.getValue(1);
13770 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13773 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13775 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13777 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13778 X86::RAX, X86II::MO_TLSGD);
13781 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13787 // Get the start address of the TLS block for this module.
13788 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13789 .getInfo<X86MachineFunctionInfo>();
13790 MFI->incNumLocalDynamicTLSAccesses();
13794 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13795 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13798 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13799 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13800 InFlag = Chain.getValue(1);
13801 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13802 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13805 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13809 unsigned char OperandFlags = X86II::MO_DTPOFF;
13810 unsigned WrapperKind = X86ISD::Wrapper;
13811 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13812 GA->getValueType(0),
13813 GA->getOffset(), OperandFlags);
13814 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13816 // Add x@dtpoff with the base.
13817 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13820 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13821 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13822 const EVT PtrVT, TLSModel::Model model,
13823 bool is64Bit, bool isPIC) {
13826 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13827 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13828 is64Bit ? 257 : 256));
13830 SDValue ThreadPointer =
13831 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13832 MachinePointerInfo(Ptr), false, false, false, 0);
13834 unsigned char OperandFlags = 0;
13835 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13837 unsigned WrapperKind = X86ISD::Wrapper;
13838 if (model == TLSModel::LocalExec) {
13839 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13840 } else if (model == TLSModel::InitialExec) {
13842 OperandFlags = X86II::MO_GOTTPOFF;
13843 WrapperKind = X86ISD::WrapperRIP;
13845 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13848 llvm_unreachable("Unexpected model");
13851 // emit "addl x@ntpoff,%eax" (local exec)
13852 // or "addl x@indntpoff,%eax" (initial exec)
13853 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13855 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13856 GA->getOffset(), OperandFlags);
13857 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13859 if (model == TLSModel::InitialExec) {
13860 if (isPIC && !is64Bit) {
13861 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13862 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13866 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13867 MachinePointerInfo::getGOT(), false, false, false, 0);
13870 // The address of the thread local variable is the add of the thread
13871 // pointer with the offset of the variable.
13872 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13876 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13878 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13879 const GlobalValue *GV = GA->getGlobal();
13881 if (Subtarget->isTargetELF()) {
13882 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13885 case TLSModel::GeneralDynamic:
13886 if (Subtarget->is64Bit())
13887 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13888 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13889 case TLSModel::LocalDynamic:
13890 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13891 Subtarget->is64Bit());
13892 case TLSModel::InitialExec:
13893 case TLSModel::LocalExec:
13894 return LowerToTLSExecModel(
13895 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13896 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13898 llvm_unreachable("Unknown TLS model.");
13901 if (Subtarget->isTargetDarwin()) {
13902 // Darwin only has one model of TLS. Lower to that.
13903 unsigned char OpFlag = 0;
13904 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13905 X86ISD::WrapperRIP : X86ISD::Wrapper;
13907 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13908 // global base reg.
13909 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13910 !Subtarget->is64Bit();
13912 OpFlag = X86II::MO_TLVP_PIC_BASE;
13914 OpFlag = X86II::MO_TLVP;
13916 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13917 GA->getValueType(0),
13918 GA->getOffset(), OpFlag);
13919 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13921 // With PIC32, the address is actually $g + Offset.
13923 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13924 DAG.getNode(X86ISD::GlobalBaseReg,
13925 SDLoc(), getPointerTy()),
13928 // Lowering the machine isd will make sure everything is in the right
13930 SDValue Chain = DAG.getEntryNode();
13931 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13932 SDValue Args[] = { Chain, Offset };
13933 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13935 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13936 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13937 MFI->setAdjustsStack(true);
13939 // And our return value (tls address) is in the standard call return value
13941 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13942 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13943 Chain.getValue(1));
13946 if (Subtarget->isTargetKnownWindowsMSVC() ||
13947 Subtarget->isTargetWindowsGNU()) {
13948 // Just use the implicit TLS architecture
13949 // Need to generate someting similar to:
13950 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13952 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13953 // mov rcx, qword [rdx+rcx*8]
13954 // mov eax, .tls$:tlsvar
13955 // [rax+rcx] contains the address
13956 // Windows 64bit: gs:0x58
13957 // Windows 32bit: fs:__tls_array
13960 SDValue Chain = DAG.getEntryNode();
13962 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13963 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13964 // use its literal value of 0x2C.
13965 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13966 ? Type::getInt8PtrTy(*DAG.getContext(),
13968 : Type::getInt32PtrTy(*DAG.getContext(),
13972 Subtarget->is64Bit()
13973 ? DAG.getIntPtrConstant(0x58)
13974 : (Subtarget->isTargetWindowsGNU()
13975 ? DAG.getIntPtrConstant(0x2C)
13976 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13978 SDValue ThreadPointer =
13979 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13980 MachinePointerInfo(Ptr), false, false, false, 0);
13982 // Load the _tls_index variable
13983 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13984 if (Subtarget->is64Bit())
13985 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13986 IDX, MachinePointerInfo(), MVT::i32,
13987 false, false, false, 0);
13989 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13990 false, false, false, 0);
13992 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13994 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
13996 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
13997 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
13998 false, false, false, 0);
14000 // Get the offset of start of .tls section
14001 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14002 GA->getValueType(0),
14003 GA->getOffset(), X86II::MO_SECREL);
14004 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14006 // The address of the thread local variable is the add of the thread
14007 // pointer with the offset of the variable.
14008 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14011 llvm_unreachable("TLS not implemented for this target.");
14014 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14015 /// and take a 2 x i32 value to shift plus a shift amount.
14016 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14017 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14018 MVT VT = Op.getSimpleValueType();
14019 unsigned VTBits = VT.getSizeInBits();
14021 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14022 SDValue ShOpLo = Op.getOperand(0);
14023 SDValue ShOpHi = Op.getOperand(1);
14024 SDValue ShAmt = Op.getOperand(2);
14025 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14026 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14028 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14029 DAG.getConstant(VTBits - 1, MVT::i8));
14030 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14031 DAG.getConstant(VTBits - 1, MVT::i8))
14032 : DAG.getConstant(0, VT);
14034 SDValue Tmp2, Tmp3;
14035 if (Op.getOpcode() == ISD::SHL_PARTS) {
14036 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14037 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14039 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14040 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14043 // If the shift amount is larger or equal than the width of a part we can't
14044 // rely on the results of shld/shrd. Insert a test and select the appropriate
14045 // values for large shift amounts.
14046 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14047 DAG.getConstant(VTBits, MVT::i8));
14048 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14049 AndNode, DAG.getConstant(0, MVT::i8));
14052 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14053 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14054 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14056 if (Op.getOpcode() == ISD::SHL_PARTS) {
14057 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14058 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14060 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14061 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14064 SDValue Ops[2] = { Lo, Hi };
14065 return DAG.getMergeValues(Ops, dl);
14068 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14069 SelectionDAG &DAG) const {
14070 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14073 if (SrcVT.isVector()) {
14074 if (SrcVT.getVectorElementType() == MVT::i1) {
14075 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14076 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14077 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14078 Op.getOperand(0)));
14083 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14084 "Unknown SINT_TO_FP to lower!");
14086 // These are really Legal; return the operand so the caller accepts it as
14088 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14090 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14091 Subtarget->is64Bit()) {
14095 unsigned Size = SrcVT.getSizeInBits()/8;
14096 MachineFunction &MF = DAG.getMachineFunction();
14097 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14098 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14099 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14101 MachinePointerInfo::getFixedStack(SSFI),
14103 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14106 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14108 SelectionDAG &DAG) const {
14112 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14114 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14116 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14118 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14120 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14121 MachineMemOperand *MMO;
14123 int SSFI = FI->getIndex();
14125 DAG.getMachineFunction()
14126 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14127 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14129 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14130 StackSlot = StackSlot.getOperand(1);
14132 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14133 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14135 Tys, Ops, SrcVT, MMO);
14138 Chain = Result.getValue(1);
14139 SDValue InFlag = Result.getValue(2);
14141 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14142 // shouldn't be necessary except that RFP cannot be live across
14143 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14144 MachineFunction &MF = DAG.getMachineFunction();
14145 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14146 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14147 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14148 Tys = DAG.getVTList(MVT::Other);
14150 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14152 MachineMemOperand *MMO =
14153 DAG.getMachineFunction()
14154 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14155 MachineMemOperand::MOStore, SSFISize, SSFISize);
14157 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14158 Ops, Op.getValueType(), MMO);
14159 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14160 MachinePointerInfo::getFixedStack(SSFI),
14161 false, false, false, 0);
14167 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14168 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14169 SelectionDAG &DAG) const {
14170 // This algorithm is not obvious. Here it is what we're trying to output:
14173 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14174 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14176 haddpd %xmm0, %xmm0
14178 pshufd $0x4e, %xmm0, %xmm1
14184 LLVMContext *Context = DAG.getContext();
14186 // Build some magic constants.
14187 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14188 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14189 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14191 SmallVector<Constant*,2> CV1;
14193 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14194 APInt(64, 0x4330000000000000ULL))));
14196 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14197 APInt(64, 0x4530000000000000ULL))));
14198 Constant *C1 = ConstantVector::get(CV1);
14199 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14201 // Load the 64-bit value into an XMM register.
14202 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14204 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14205 MachinePointerInfo::getConstantPool(),
14206 false, false, false, 16);
14207 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14208 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14211 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14212 MachinePointerInfo::getConstantPool(),
14213 false, false, false, 16);
14214 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14215 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14218 if (Subtarget->hasSSE3()) {
14219 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14220 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14222 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14223 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14225 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14226 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14230 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14231 DAG.getIntPtrConstant(0));
14234 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14235 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14236 SelectionDAG &DAG) const {
14238 // FP constant to bias correct the final result.
14239 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14242 // Load the 32-bit value into an XMM register.
14243 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14246 // Zero out the upper parts of the register.
14247 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14249 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14250 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14251 DAG.getIntPtrConstant(0));
14253 // Or the load with the bias.
14254 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14255 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14256 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14257 MVT::v2f64, Load)),
14258 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14259 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14260 MVT::v2f64, Bias)));
14261 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14262 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14263 DAG.getIntPtrConstant(0));
14265 // Subtract the bias.
14266 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14268 // Handle final rounding.
14269 EVT DestVT = Op.getValueType();
14271 if (DestVT.bitsLT(MVT::f64))
14272 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14273 DAG.getIntPtrConstant(0));
14274 if (DestVT.bitsGT(MVT::f64))
14275 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14277 // Handle final rounding.
14281 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14282 const X86Subtarget &Subtarget) {
14283 // The algorithm is the following:
14284 // #ifdef __SSE4_1__
14285 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14286 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14287 // (uint4) 0x53000000, 0xaa);
14289 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14290 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14292 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14293 // return (float4) lo + fhi;
14296 SDValue V = Op->getOperand(0);
14297 EVT VecIntVT = V.getValueType();
14298 bool Is128 = VecIntVT == MVT::v4i32;
14299 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14300 // If we convert to something else than the supported type, e.g., to v4f64,
14302 if (VecFloatVT != Op->getValueType(0))
14305 unsigned NumElts = VecIntVT.getVectorNumElements();
14306 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14307 "Unsupported custom type");
14308 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14310 // In the #idef/#else code, we have in common:
14311 // - The vector of constants:
14317 // Create the splat vector for 0x4b000000.
14318 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14319 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14320 CstLow, CstLow, CstLow, CstLow};
14321 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14322 makeArrayRef(&CstLowArray[0], NumElts));
14323 // Create the splat vector for 0x53000000.
14324 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14325 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14326 CstHigh, CstHigh, CstHigh, CstHigh};
14327 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14328 makeArrayRef(&CstHighArray[0], NumElts));
14330 // Create the right shift.
14331 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14332 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14333 CstShift, CstShift, CstShift, CstShift};
14334 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14335 makeArrayRef(&CstShiftArray[0], NumElts));
14336 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14339 if (Subtarget.hasSSE41()) {
14340 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14341 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14342 SDValue VecCstLowBitcast =
14343 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14344 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14345 // Low will be bitcasted right away, so do not bother bitcasting back to its
14347 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14348 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14349 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14350 // (uint4) 0x53000000, 0xaa);
14351 SDValue VecCstHighBitcast =
14352 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14353 SDValue VecShiftBitcast =
14354 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14355 // High will be bitcasted right away, so do not bother bitcasting back to
14356 // its original type.
14357 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14358 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14360 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14361 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14362 CstMask, CstMask, CstMask);
14363 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14364 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14365 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14367 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14368 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14371 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14372 SDValue CstFAdd = DAG.getConstantFP(
14373 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14374 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14375 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14376 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14377 makeArrayRef(&CstFAddArray[0], NumElts));
14379 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14380 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14382 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14383 // return (float4) lo + fhi;
14384 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14385 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14388 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14389 SelectionDAG &DAG) const {
14390 SDValue N0 = Op.getOperand(0);
14391 MVT SVT = N0.getSimpleValueType();
14394 switch (SVT.SimpleTy) {
14396 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14401 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14402 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14403 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14407 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14409 llvm_unreachable(nullptr);
14412 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14413 SelectionDAG &DAG) const {
14414 SDValue N0 = Op.getOperand(0);
14417 if (Op.getValueType().isVector())
14418 return lowerUINT_TO_FP_vec(Op, DAG);
14420 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14421 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14422 // the optimization here.
14423 if (DAG.SignBitIsZero(N0))
14424 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14426 MVT SrcVT = N0.getSimpleValueType();
14427 MVT DstVT = Op.getSimpleValueType();
14428 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14429 return LowerUINT_TO_FP_i64(Op, DAG);
14430 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14431 return LowerUINT_TO_FP_i32(Op, DAG);
14432 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14435 // Make a 64-bit buffer, and use it to build an FILD.
14436 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14437 if (SrcVT == MVT::i32) {
14438 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14439 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14440 getPointerTy(), StackSlot, WordOff);
14441 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14442 StackSlot, MachinePointerInfo(),
14444 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14445 OffsetSlot, MachinePointerInfo(),
14447 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14451 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14452 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14453 StackSlot, MachinePointerInfo(),
14455 // For i64 source, we need to add the appropriate power of 2 if the input
14456 // was negative. This is the same as the optimization in
14457 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14458 // we must be careful to do the computation in x87 extended precision, not
14459 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14460 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14461 MachineMemOperand *MMO =
14462 DAG.getMachineFunction()
14463 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14464 MachineMemOperand::MOLoad, 8, 8);
14466 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14467 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14468 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14471 APInt FF(32, 0x5F800000ULL);
14473 // Check whether the sign bit is set.
14474 SDValue SignSet = DAG.getSetCC(dl,
14475 getSetCCResultType(*DAG.getContext(), MVT::i64),
14476 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14479 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14480 SDValue FudgePtr = DAG.getConstantPool(
14481 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14484 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14485 SDValue Zero = DAG.getIntPtrConstant(0);
14486 SDValue Four = DAG.getIntPtrConstant(4);
14487 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14489 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14491 // Load the value out, extending it from f32 to f80.
14492 // FIXME: Avoid the extend by constructing the right constant pool?
14493 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14494 FudgePtr, MachinePointerInfo::getConstantPool(),
14495 MVT::f32, false, false, false, 4);
14496 // Extend everything to 80 bits to force it to be done on x87.
14497 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14498 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14501 std::pair<SDValue,SDValue>
14502 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14503 bool IsSigned, bool IsReplace) const {
14506 EVT DstTy = Op.getValueType();
14508 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14509 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14513 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14514 DstTy.getSimpleVT() >= MVT::i16 &&
14515 "Unknown FP_TO_INT to lower!");
14517 // These are really Legal.
14518 if (DstTy == MVT::i32 &&
14519 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14520 return std::make_pair(SDValue(), SDValue());
14521 if (Subtarget->is64Bit() &&
14522 DstTy == MVT::i64 &&
14523 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14524 return std::make_pair(SDValue(), SDValue());
14526 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14527 // stack slot, or into the FTOL runtime function.
14528 MachineFunction &MF = DAG.getMachineFunction();
14529 unsigned MemSize = DstTy.getSizeInBits()/8;
14530 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14531 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14534 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14535 Opc = X86ISD::WIN_FTOL;
14537 switch (DstTy.getSimpleVT().SimpleTy) {
14538 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14539 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14540 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14541 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14544 SDValue Chain = DAG.getEntryNode();
14545 SDValue Value = Op.getOperand(0);
14546 EVT TheVT = Op.getOperand(0).getValueType();
14547 // FIXME This causes a redundant load/store if the SSE-class value is already
14548 // in memory, such as if it is on the callstack.
14549 if (isScalarFPTypeInSSEReg(TheVT)) {
14550 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14551 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14552 MachinePointerInfo::getFixedStack(SSFI),
14554 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14556 Chain, StackSlot, DAG.getValueType(TheVT)
14559 MachineMemOperand *MMO =
14560 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14561 MachineMemOperand::MOLoad, MemSize, MemSize);
14562 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14563 Chain = Value.getValue(1);
14564 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14565 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14568 MachineMemOperand *MMO =
14569 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14570 MachineMemOperand::MOStore, MemSize, MemSize);
14572 if (Opc != X86ISD::WIN_FTOL) {
14573 // Build the FP_TO_INT*_IN_MEM
14574 SDValue Ops[] = { Chain, Value, StackSlot };
14575 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14577 return std::make_pair(FIST, StackSlot);
14579 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14580 DAG.getVTList(MVT::Other, MVT::Glue),
14582 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14583 MVT::i32, ftol.getValue(1));
14584 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14585 MVT::i32, eax.getValue(2));
14586 SDValue Ops[] = { eax, edx };
14587 SDValue pair = IsReplace
14588 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14589 : DAG.getMergeValues(Ops, DL);
14590 return std::make_pair(pair, SDValue());
14594 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14595 const X86Subtarget *Subtarget) {
14596 MVT VT = Op->getSimpleValueType(0);
14597 SDValue In = Op->getOperand(0);
14598 MVT InVT = In.getSimpleValueType();
14601 // Optimize vectors in AVX mode:
14604 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14605 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14606 // Concat upper and lower parts.
14609 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14610 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14611 // Concat upper and lower parts.
14614 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14615 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14616 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14619 if (Subtarget->hasInt256())
14620 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14622 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14623 SDValue Undef = DAG.getUNDEF(InVT);
14624 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14625 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14626 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14628 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14629 VT.getVectorNumElements()/2);
14631 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14632 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14634 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14637 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14638 SelectionDAG &DAG) {
14639 MVT VT = Op->getSimpleValueType(0);
14640 SDValue In = Op->getOperand(0);
14641 MVT InVT = In.getSimpleValueType();
14643 unsigned int NumElts = VT.getVectorNumElements();
14644 if (NumElts != 8 && NumElts != 16)
14647 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14648 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14650 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14651 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14652 // Now we have only mask extension
14653 assert(InVT.getVectorElementType() == MVT::i1);
14654 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14655 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14656 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14657 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14658 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14659 MachinePointerInfo::getConstantPool(),
14660 false, false, false, Alignment);
14662 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14663 if (VT.is512BitVector())
14665 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14668 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14669 SelectionDAG &DAG) {
14670 if (Subtarget->hasFp256()) {
14671 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14679 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14680 SelectionDAG &DAG) {
14682 MVT VT = Op.getSimpleValueType();
14683 SDValue In = Op.getOperand(0);
14684 MVT SVT = In.getSimpleValueType();
14686 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14687 return LowerZERO_EXTEND_AVX512(Op, DAG);
14689 if (Subtarget->hasFp256()) {
14690 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14695 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14696 VT.getVectorNumElements() != SVT.getVectorNumElements());
14700 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14702 MVT VT = Op.getSimpleValueType();
14703 SDValue In = Op.getOperand(0);
14704 MVT InVT = In.getSimpleValueType();
14706 if (VT == MVT::i1) {
14707 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14708 "Invalid scalar TRUNCATE operation");
14709 if (InVT.getSizeInBits() >= 32)
14711 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14712 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14714 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14715 "Invalid TRUNCATE operation");
14717 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14718 if (VT.getVectorElementType().getSizeInBits() >=8)
14719 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14721 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14722 unsigned NumElts = InVT.getVectorNumElements();
14723 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14724 if (InVT.getSizeInBits() < 512) {
14725 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14726 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14730 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14731 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14732 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14733 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14734 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14735 MachinePointerInfo::getConstantPool(),
14736 false, false, false, Alignment);
14737 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14738 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14739 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14742 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14743 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14744 if (Subtarget->hasInt256()) {
14745 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14746 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14747 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14749 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14750 DAG.getIntPtrConstant(0));
14753 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14754 DAG.getIntPtrConstant(0));
14755 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14756 DAG.getIntPtrConstant(2));
14757 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14758 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14759 static const int ShufMask[] = {0, 2, 4, 6};
14760 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14763 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14764 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14765 if (Subtarget->hasInt256()) {
14766 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14768 SmallVector<SDValue,32> pshufbMask;
14769 for (unsigned i = 0; i < 2; ++i) {
14770 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14771 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14772 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14773 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14774 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14775 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14776 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14777 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14778 for (unsigned j = 0; j < 8; ++j)
14779 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14781 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14782 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14783 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14785 static const int ShufMask[] = {0, 2, -1, -1};
14786 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14788 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14789 DAG.getIntPtrConstant(0));
14790 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14793 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14794 DAG.getIntPtrConstant(0));
14796 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14797 DAG.getIntPtrConstant(4));
14799 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14800 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14802 // The PSHUFB mask:
14803 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14804 -1, -1, -1, -1, -1, -1, -1, -1};
14806 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14807 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14808 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14810 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14811 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14813 // The MOVLHPS Mask:
14814 static const int ShufMask2[] = {0, 1, 4, 5};
14815 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14816 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14819 // Handle truncation of V256 to V128 using shuffles.
14820 if (!VT.is128BitVector() || !InVT.is256BitVector())
14823 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14825 unsigned NumElems = VT.getVectorNumElements();
14826 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14828 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14829 // Prepare truncation shuffle mask
14830 for (unsigned i = 0; i != NumElems; ++i)
14831 MaskVec[i] = i * 2;
14832 SDValue V = DAG.getVectorShuffle(NVT, DL,
14833 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14834 DAG.getUNDEF(NVT), &MaskVec[0]);
14835 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14836 DAG.getIntPtrConstant(0));
14839 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14840 SelectionDAG &DAG) const {
14841 assert(!Op.getSimpleValueType().isVector());
14843 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14844 /*IsSigned=*/ true, /*IsReplace=*/ false);
14845 SDValue FIST = Vals.first, StackSlot = Vals.second;
14846 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14847 if (!FIST.getNode()) return Op;
14849 if (StackSlot.getNode())
14850 // Load the result.
14851 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14852 FIST, StackSlot, MachinePointerInfo(),
14853 false, false, false, 0);
14855 // The node is the result.
14859 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14860 SelectionDAG &DAG) const {
14861 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14862 /*IsSigned=*/ false, /*IsReplace=*/ false);
14863 SDValue FIST = Vals.first, StackSlot = Vals.second;
14864 assert(FIST.getNode() && "Unexpected failure");
14866 if (StackSlot.getNode())
14867 // Load the result.
14868 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14869 FIST, StackSlot, MachinePointerInfo(),
14870 false, false, false, 0);
14872 // The node is the result.
14876 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14878 MVT VT = Op.getSimpleValueType();
14879 SDValue In = Op.getOperand(0);
14880 MVT SVT = In.getSimpleValueType();
14882 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14884 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14885 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14886 In, DAG.getUNDEF(SVT)));
14889 /// The only differences between FABS and FNEG are the mask and the logic op.
14890 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14891 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14892 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14893 "Wrong opcode for lowering FABS or FNEG.");
14895 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14897 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14898 // into an FNABS. We'll lower the FABS after that if it is still in use.
14900 for (SDNode *User : Op->uses())
14901 if (User->getOpcode() == ISD::FNEG)
14904 SDValue Op0 = Op.getOperand(0);
14905 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14908 MVT VT = Op.getSimpleValueType();
14909 // Assume scalar op for initialization; update for vector if needed.
14910 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14911 // generate a 16-byte vector constant and logic op even for the scalar case.
14912 // Using a 16-byte mask allows folding the load of the mask with
14913 // the logic op, so it can save (~4 bytes) on code size.
14915 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14916 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14917 // decide if we should generate a 16-byte constant mask when we only need 4 or
14918 // 8 bytes for the scalar case.
14919 if (VT.isVector()) {
14920 EltVT = VT.getVectorElementType();
14921 NumElts = VT.getVectorNumElements();
14924 unsigned EltBits = EltVT.getSizeInBits();
14925 LLVMContext *Context = DAG.getContext();
14926 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14928 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14929 Constant *C = ConstantInt::get(*Context, MaskElt);
14930 C = ConstantVector::getSplat(NumElts, C);
14931 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14932 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14933 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14934 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14935 MachinePointerInfo::getConstantPool(),
14936 false, false, false, Alignment);
14938 if (VT.isVector()) {
14939 // For a vector, cast operands to a vector type, perform the logic op,
14940 // and cast the result back to the original value type.
14941 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14942 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14943 SDValue Operand = IsFNABS ?
14944 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14945 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14946 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14947 return DAG.getNode(ISD::BITCAST, dl, VT,
14948 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14951 // If not vector, then scalar.
14952 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14953 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14954 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14957 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14958 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14959 LLVMContext *Context = DAG.getContext();
14960 SDValue Op0 = Op.getOperand(0);
14961 SDValue Op1 = Op.getOperand(1);
14963 MVT VT = Op.getSimpleValueType();
14964 MVT SrcVT = Op1.getSimpleValueType();
14966 // If second operand is smaller, extend it first.
14967 if (SrcVT.bitsLT(VT)) {
14968 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14971 // And if it is bigger, shrink it first.
14972 if (SrcVT.bitsGT(VT)) {
14973 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14977 // At this point the operands and the result should have the same
14978 // type, and that won't be f80 since that is not custom lowered.
14980 const fltSemantics &Sem =
14981 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14982 const unsigned SizeInBits = VT.getSizeInBits();
14984 SmallVector<Constant *, 4> CV(
14985 VT == MVT::f64 ? 2 : 4,
14986 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14988 // First, clear all bits but the sign bit from the second operand (sign).
14989 CV[0] = ConstantFP::get(*Context,
14990 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14991 Constant *C = ConstantVector::get(CV);
14992 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14993 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14994 MachinePointerInfo::getConstantPool(),
14995 false, false, false, 16);
14996 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
14998 // Next, clear the sign bit from the first operand (magnitude).
14999 // If it's a constant, we can clear it here.
15000 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15001 APFloat APF = Op0CN->getValueAPF();
15002 // If the magnitude is a positive zero, the sign bit alone is enough.
15003 if (APF.isPosZero())
15006 CV[0] = ConstantFP::get(*Context, APF);
15008 CV[0] = ConstantFP::get(
15010 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15012 C = ConstantVector::get(CV);
15013 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15014 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15015 MachinePointerInfo::getConstantPool(),
15016 false, false, false, 16);
15017 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15018 if (!isa<ConstantFPSDNode>(Op0))
15019 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15021 // OR the magnitude value with the sign bit.
15022 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15025 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15026 SDValue N0 = Op.getOperand(0);
15028 MVT VT = Op.getSimpleValueType();
15030 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15031 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15032 DAG.getConstant(1, VT));
15033 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15036 // Check whether an OR'd tree is PTEST-able.
15037 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15038 SelectionDAG &DAG) {
15039 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15041 if (!Subtarget->hasSSE41())
15044 if (!Op->hasOneUse())
15047 SDNode *N = Op.getNode();
15050 SmallVector<SDValue, 8> Opnds;
15051 DenseMap<SDValue, unsigned> VecInMap;
15052 SmallVector<SDValue, 8> VecIns;
15053 EVT VT = MVT::Other;
15055 // Recognize a special case where a vector is casted into wide integer to
15057 Opnds.push_back(N->getOperand(0));
15058 Opnds.push_back(N->getOperand(1));
15060 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15061 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15062 // BFS traverse all OR'd operands.
15063 if (I->getOpcode() == ISD::OR) {
15064 Opnds.push_back(I->getOperand(0));
15065 Opnds.push_back(I->getOperand(1));
15066 // Re-evaluate the number of nodes to be traversed.
15067 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15071 // Quit if a non-EXTRACT_VECTOR_ELT
15072 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15075 // Quit if without a constant index.
15076 SDValue Idx = I->getOperand(1);
15077 if (!isa<ConstantSDNode>(Idx))
15080 SDValue ExtractedFromVec = I->getOperand(0);
15081 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15082 if (M == VecInMap.end()) {
15083 VT = ExtractedFromVec.getValueType();
15084 // Quit if not 128/256-bit vector.
15085 if (!VT.is128BitVector() && !VT.is256BitVector())
15087 // Quit if not the same type.
15088 if (VecInMap.begin() != VecInMap.end() &&
15089 VT != VecInMap.begin()->first.getValueType())
15091 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15092 VecIns.push_back(ExtractedFromVec);
15094 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15097 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15098 "Not extracted from 128-/256-bit vector.");
15100 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15102 for (DenseMap<SDValue, unsigned>::const_iterator
15103 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15104 // Quit if not all elements are used.
15105 if (I->second != FullMask)
15109 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15111 // Cast all vectors into TestVT for PTEST.
15112 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15113 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15115 // If more than one full vectors are evaluated, OR them first before PTEST.
15116 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15117 // Each iteration will OR 2 nodes and append the result until there is only
15118 // 1 node left, i.e. the final OR'd value of all vectors.
15119 SDValue LHS = VecIns[Slot];
15120 SDValue RHS = VecIns[Slot + 1];
15121 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15124 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15125 VecIns.back(), VecIns.back());
15128 /// \brief return true if \c Op has a use that doesn't just read flags.
15129 static bool hasNonFlagsUse(SDValue Op) {
15130 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15132 SDNode *User = *UI;
15133 unsigned UOpNo = UI.getOperandNo();
15134 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15135 // Look pass truncate.
15136 UOpNo = User->use_begin().getOperandNo();
15137 User = *User->use_begin();
15140 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15141 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15147 /// Emit nodes that will be selected as "test Op0,Op0", or something
15149 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15150 SelectionDAG &DAG) const {
15151 if (Op.getValueType() == MVT::i1) {
15152 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15153 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15154 DAG.getConstant(0, MVT::i8));
15156 // CF and OF aren't always set the way we want. Determine which
15157 // of these we need.
15158 bool NeedCF = false;
15159 bool NeedOF = false;
15162 case X86::COND_A: case X86::COND_AE:
15163 case X86::COND_B: case X86::COND_BE:
15166 case X86::COND_G: case X86::COND_GE:
15167 case X86::COND_L: case X86::COND_LE:
15168 case X86::COND_O: case X86::COND_NO: {
15169 // Check if we really need to set the
15170 // Overflow flag. If NoSignedWrap is present
15171 // that is not actually needed.
15172 switch (Op->getOpcode()) {
15177 const BinaryWithFlagsSDNode *BinNode =
15178 cast<BinaryWithFlagsSDNode>(Op.getNode());
15179 if (BinNode->hasNoSignedWrap())
15189 // See if we can use the EFLAGS value from the operand instead of
15190 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15191 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15192 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15193 // Emit a CMP with 0, which is the TEST pattern.
15194 //if (Op.getValueType() == MVT::i1)
15195 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15196 // DAG.getConstant(0, MVT::i1));
15197 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15198 DAG.getConstant(0, Op.getValueType()));
15200 unsigned Opcode = 0;
15201 unsigned NumOperands = 0;
15203 // Truncate operations may prevent the merge of the SETCC instruction
15204 // and the arithmetic instruction before it. Attempt to truncate the operands
15205 // of the arithmetic instruction and use a reduced bit-width instruction.
15206 bool NeedTruncation = false;
15207 SDValue ArithOp = Op;
15208 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15209 SDValue Arith = Op->getOperand(0);
15210 // Both the trunc and the arithmetic op need to have one user each.
15211 if (Arith->hasOneUse())
15212 switch (Arith.getOpcode()) {
15219 NeedTruncation = true;
15225 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15226 // which may be the result of a CAST. We use the variable 'Op', which is the
15227 // non-casted variable when we check for possible users.
15228 switch (ArithOp.getOpcode()) {
15230 // Due to an isel shortcoming, be conservative if this add is likely to be
15231 // selected as part of a load-modify-store instruction. When the root node
15232 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15233 // uses of other nodes in the match, such as the ADD in this case. This
15234 // leads to the ADD being left around and reselected, with the result being
15235 // two adds in the output. Alas, even if none our users are stores, that
15236 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15237 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15238 // climbing the DAG back to the root, and it doesn't seem to be worth the
15240 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15241 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15242 if (UI->getOpcode() != ISD::CopyToReg &&
15243 UI->getOpcode() != ISD::SETCC &&
15244 UI->getOpcode() != ISD::STORE)
15247 if (ConstantSDNode *C =
15248 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15249 // An add of one will be selected as an INC.
15250 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15251 Opcode = X86ISD::INC;
15256 // An add of negative one (subtract of one) will be selected as a DEC.
15257 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15258 Opcode = X86ISD::DEC;
15264 // Otherwise use a regular EFLAGS-setting add.
15265 Opcode = X86ISD::ADD;
15270 // If we have a constant logical shift that's only used in a comparison
15271 // against zero turn it into an equivalent AND. This allows turning it into
15272 // a TEST instruction later.
15273 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15274 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15275 EVT VT = Op.getValueType();
15276 unsigned BitWidth = VT.getSizeInBits();
15277 unsigned ShAmt = Op->getConstantOperandVal(1);
15278 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15280 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15281 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15282 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15283 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15285 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15286 DAG.getConstant(Mask, VT));
15287 DAG.ReplaceAllUsesWith(Op, New);
15293 // If the primary and result isn't used, don't bother using X86ISD::AND,
15294 // because a TEST instruction will be better.
15295 if (!hasNonFlagsUse(Op))
15301 // Due to the ISEL shortcoming noted above, be conservative if this op is
15302 // likely to be selected as part of a load-modify-store instruction.
15303 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15304 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15305 if (UI->getOpcode() == ISD::STORE)
15308 // Otherwise use a regular EFLAGS-setting instruction.
15309 switch (ArithOp.getOpcode()) {
15310 default: llvm_unreachable("unexpected operator!");
15311 case ISD::SUB: Opcode = X86ISD::SUB; break;
15312 case ISD::XOR: Opcode = X86ISD::XOR; break;
15313 case ISD::AND: Opcode = X86ISD::AND; break;
15315 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15316 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15317 if (EFLAGS.getNode())
15320 Opcode = X86ISD::OR;
15334 return SDValue(Op.getNode(), 1);
15340 // If we found that truncation is beneficial, perform the truncation and
15342 if (NeedTruncation) {
15343 EVT VT = Op.getValueType();
15344 SDValue WideVal = Op->getOperand(0);
15345 EVT WideVT = WideVal.getValueType();
15346 unsigned ConvertedOp = 0;
15347 // Use a target machine opcode to prevent further DAGCombine
15348 // optimizations that may separate the arithmetic operations
15349 // from the setcc node.
15350 switch (WideVal.getOpcode()) {
15352 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15353 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15354 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15355 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15356 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15360 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15361 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15362 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15363 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15364 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15370 // Emit a CMP with 0, which is the TEST pattern.
15371 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15372 DAG.getConstant(0, Op.getValueType()));
15374 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15375 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
15377 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15378 DAG.ReplaceAllUsesWith(Op, New);
15379 return SDValue(New.getNode(), 1);
15382 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15384 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15385 SDLoc dl, SelectionDAG &DAG) const {
15386 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15387 if (C->getAPIntValue() == 0)
15388 return EmitTest(Op0, X86CC, dl, DAG);
15390 if (Op0.getValueType() == MVT::i1)
15391 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15394 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15395 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15396 // Do the comparison at i32 if it's smaller, besides the Atom case.
15397 // This avoids subregister aliasing issues. Keep the smaller reference
15398 // if we're optimizing for size, however, as that'll allow better folding
15399 // of memory operations.
15400 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15401 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15402 Attribute::MinSize) &&
15403 !Subtarget->isAtom()) {
15404 unsigned ExtendOp =
15405 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15406 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15407 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15409 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15410 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15411 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15413 return SDValue(Sub.getNode(), 1);
15415 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15418 /// Convert a comparison if required by the subtarget.
15419 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15420 SelectionDAG &DAG) const {
15421 // If the subtarget does not support the FUCOMI instruction, floating-point
15422 // comparisons have to be converted.
15423 if (Subtarget->hasCMov() ||
15424 Cmp.getOpcode() != X86ISD::CMP ||
15425 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15426 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15429 // The instruction selector will select an FUCOM instruction instead of
15430 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15431 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15432 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15434 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15435 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15436 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15437 DAG.getConstant(8, MVT::i8));
15438 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15439 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15442 /// The minimum architected relative accuracy is 2^-12. We need one
15443 /// Newton-Raphson step to have a good float result (24 bits of precision).
15444 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15445 DAGCombinerInfo &DCI,
15446 unsigned &RefinementSteps,
15447 bool &UseOneConstNR) const {
15448 // FIXME: We should use instruction latency models to calculate the cost of
15449 // each potential sequence, but this is very hard to do reliably because
15450 // at least Intel's Core* chips have variable timing based on the number of
15451 // significant digits in the divisor and/or sqrt operand.
15452 if (!Subtarget->useSqrtEst())
15455 EVT VT = Op.getValueType();
15457 // SSE1 has rsqrtss and rsqrtps.
15458 // TODO: Add support for AVX512 (v16f32).
15459 // It is likely not profitable to do this for f64 because a double-precision
15460 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15461 // instructions: convert to single, rsqrtss, convert back to double, refine
15462 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15463 // along with FMA, this could be a throughput win.
15464 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15465 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15466 RefinementSteps = 1;
15467 UseOneConstNR = false;
15468 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15473 /// The minimum architected relative accuracy is 2^-12. We need one
15474 /// Newton-Raphson step to have a good float result (24 bits of precision).
15475 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15476 DAGCombinerInfo &DCI,
15477 unsigned &RefinementSteps) const {
15478 // FIXME: We should use instruction latency models to calculate the cost of
15479 // each potential sequence, but this is very hard to do reliably because
15480 // at least Intel's Core* chips have variable timing based on the number of
15481 // significant digits in the divisor.
15482 if (!Subtarget->useReciprocalEst())
15485 EVT VT = Op.getValueType();
15487 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15488 // TODO: Add support for AVX512 (v16f32).
15489 // It is likely not profitable to do this for f64 because a double-precision
15490 // reciprocal estimate with refinement on x86 prior to FMA requires
15491 // 15 instructions: convert to single, rcpss, convert back to double, refine
15492 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15493 // along with FMA, this could be a throughput win.
15494 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15495 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15496 RefinementSteps = ReciprocalEstimateRefinementSteps;
15497 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15502 static bool isAllOnes(SDValue V) {
15503 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15504 return C && C->isAllOnesValue();
15507 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15508 /// if it's possible.
15509 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15510 SDLoc dl, SelectionDAG &DAG) const {
15511 SDValue Op0 = And.getOperand(0);
15512 SDValue Op1 = And.getOperand(1);
15513 if (Op0.getOpcode() == ISD::TRUNCATE)
15514 Op0 = Op0.getOperand(0);
15515 if (Op1.getOpcode() == ISD::TRUNCATE)
15516 Op1 = Op1.getOperand(0);
15519 if (Op1.getOpcode() == ISD::SHL)
15520 std::swap(Op0, Op1);
15521 if (Op0.getOpcode() == ISD::SHL) {
15522 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15523 if (And00C->getZExtValue() == 1) {
15524 // If we looked past a truncate, check that it's only truncating away
15526 unsigned BitWidth = Op0.getValueSizeInBits();
15527 unsigned AndBitWidth = And.getValueSizeInBits();
15528 if (BitWidth > AndBitWidth) {
15530 DAG.computeKnownBits(Op0, Zeros, Ones);
15531 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15535 RHS = Op0.getOperand(1);
15537 } else if (Op1.getOpcode() == ISD::Constant) {
15538 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15539 uint64_t AndRHSVal = AndRHS->getZExtValue();
15540 SDValue AndLHS = Op0;
15542 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15543 LHS = AndLHS.getOperand(0);
15544 RHS = AndLHS.getOperand(1);
15547 // Use BT if the immediate can't be encoded in a TEST instruction.
15548 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15550 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15554 if (LHS.getNode()) {
15555 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15556 // instruction. Since the shift amount is in-range-or-undefined, we know
15557 // that doing a bittest on the i32 value is ok. We extend to i32 because
15558 // the encoding for the i16 version is larger than the i32 version.
15559 // Also promote i16 to i32 for performance / code size reason.
15560 if (LHS.getValueType() == MVT::i8 ||
15561 LHS.getValueType() == MVT::i16)
15562 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15564 // If the operand types disagree, extend the shift amount to match. Since
15565 // BT ignores high bits (like shifts) we can use anyextend.
15566 if (LHS.getValueType() != RHS.getValueType())
15567 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15569 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15570 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15571 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15572 DAG.getConstant(Cond, MVT::i8), BT);
15578 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15580 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15585 // SSE Condition code mapping:
15594 switch (SetCCOpcode) {
15595 default: llvm_unreachable("Unexpected SETCC condition");
15597 case ISD::SETEQ: SSECC = 0; break;
15599 case ISD::SETGT: Swap = true; // Fallthrough
15601 case ISD::SETOLT: SSECC = 1; break;
15603 case ISD::SETGE: Swap = true; // Fallthrough
15605 case ISD::SETOLE: SSECC = 2; break;
15606 case ISD::SETUO: SSECC = 3; break;
15608 case ISD::SETNE: SSECC = 4; break;
15609 case ISD::SETULE: Swap = true; // Fallthrough
15610 case ISD::SETUGE: SSECC = 5; break;
15611 case ISD::SETULT: Swap = true; // Fallthrough
15612 case ISD::SETUGT: SSECC = 6; break;
15613 case ISD::SETO: SSECC = 7; break;
15615 case ISD::SETONE: SSECC = 8; break;
15618 std::swap(Op0, Op1);
15623 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15624 // ones, and then concatenate the result back.
15625 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15626 MVT VT = Op.getSimpleValueType();
15628 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15629 "Unsupported value type for operation");
15631 unsigned NumElems = VT.getVectorNumElements();
15633 SDValue CC = Op.getOperand(2);
15635 // Extract the LHS vectors
15636 SDValue LHS = Op.getOperand(0);
15637 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15638 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15640 // Extract the RHS vectors
15641 SDValue RHS = Op.getOperand(1);
15642 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15643 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15645 // Issue the operation on the smaller types and concatenate the result back
15646 MVT EltVT = VT.getVectorElementType();
15647 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15648 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15649 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15650 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15653 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15654 const X86Subtarget *Subtarget) {
15655 SDValue Op0 = Op.getOperand(0);
15656 SDValue Op1 = Op.getOperand(1);
15657 SDValue CC = Op.getOperand(2);
15658 MVT VT = Op.getSimpleValueType();
15661 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15662 Op.getValueType().getScalarType() == MVT::i1 &&
15663 "Cannot set masked compare for this operation");
15665 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15667 bool Unsigned = false;
15670 switch (SetCCOpcode) {
15671 default: llvm_unreachable("Unexpected SETCC condition");
15672 case ISD::SETNE: SSECC = 4; break;
15673 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15674 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15675 case ISD::SETLT: Swap = true; //fall-through
15676 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15677 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15678 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15679 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15680 case ISD::SETULE: Unsigned = true; //fall-through
15681 case ISD::SETLE: SSECC = 2; break;
15685 std::swap(Op0, Op1);
15687 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15688 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15689 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15690 DAG.getConstant(SSECC, MVT::i8));
15693 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15694 /// operand \p Op1. If non-trivial (for example because it's not constant)
15695 /// return an empty value.
15696 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15698 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15702 MVT VT = Op1.getSimpleValueType();
15703 MVT EVT = VT.getVectorElementType();
15704 unsigned n = VT.getVectorNumElements();
15705 SmallVector<SDValue, 8> ULTOp1;
15707 for (unsigned i = 0; i < n; ++i) {
15708 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15709 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15712 // Avoid underflow.
15713 APInt Val = Elt->getAPIntValue();
15717 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15720 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15723 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15724 SelectionDAG &DAG) {
15725 SDValue Op0 = Op.getOperand(0);
15726 SDValue Op1 = Op.getOperand(1);
15727 SDValue CC = Op.getOperand(2);
15728 MVT VT = Op.getSimpleValueType();
15729 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15730 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15735 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15736 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15739 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15740 unsigned Opc = X86ISD::CMPP;
15741 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15742 assert(VT.getVectorNumElements() <= 16);
15743 Opc = X86ISD::CMPM;
15745 // In the two special cases we can't handle, emit two comparisons.
15748 unsigned CombineOpc;
15749 if (SetCCOpcode == ISD::SETUEQ) {
15750 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15752 assert(SetCCOpcode == ISD::SETONE);
15753 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15756 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15757 DAG.getConstant(CC0, MVT::i8));
15758 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15759 DAG.getConstant(CC1, MVT::i8));
15760 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15762 // Handle all other FP comparisons here.
15763 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15764 DAG.getConstant(SSECC, MVT::i8));
15767 // Break 256-bit integer vector compare into smaller ones.
15768 if (VT.is256BitVector() && !Subtarget->hasInt256())
15769 return Lower256IntVSETCC(Op, DAG);
15771 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15772 EVT OpVT = Op1.getValueType();
15773 if (Subtarget->hasAVX512()) {
15774 if (Op1.getValueType().is512BitVector() ||
15775 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15776 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15777 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15779 // In AVX-512 architecture setcc returns mask with i1 elements,
15780 // But there is no compare instruction for i8 and i16 elements in KNL.
15781 // We are not talking about 512-bit operands in this case, these
15782 // types are illegal.
15784 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15785 OpVT.getVectorElementType().getSizeInBits() >= 8))
15786 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15787 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15790 // We are handling one of the integer comparisons here. Since SSE only has
15791 // GT and EQ comparisons for integer, swapping operands and multiple
15792 // operations may be required for some comparisons.
15794 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15795 bool Subus = false;
15797 switch (SetCCOpcode) {
15798 default: llvm_unreachable("Unexpected SETCC condition");
15799 case ISD::SETNE: Invert = true;
15800 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15801 case ISD::SETLT: Swap = true;
15802 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15803 case ISD::SETGE: Swap = true;
15804 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15805 Invert = true; break;
15806 case ISD::SETULT: Swap = true;
15807 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15808 FlipSigns = true; break;
15809 case ISD::SETUGE: Swap = true;
15810 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15811 FlipSigns = true; Invert = true; break;
15814 // Special case: Use min/max operations for SETULE/SETUGE
15815 MVT VET = VT.getVectorElementType();
15817 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15818 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15821 switch (SetCCOpcode) {
15823 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15824 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15827 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15830 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15831 if (!MinMax && hasSubus) {
15832 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15834 // t = psubus Op0, Op1
15835 // pcmpeq t, <0..0>
15836 switch (SetCCOpcode) {
15838 case ISD::SETULT: {
15839 // If the comparison is against a constant we can turn this into a
15840 // setule. With psubus, setule does not require a swap. This is
15841 // beneficial because the constant in the register is no longer
15842 // destructed as the destination so it can be hoisted out of a loop.
15843 // Only do this pre-AVX since vpcmp* is no longer destructive.
15844 if (Subtarget->hasAVX())
15846 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15847 if (ULEOp1.getNode()) {
15849 Subus = true; Invert = false; Swap = false;
15853 // Psubus is better than flip-sign because it requires no inversion.
15854 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15855 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15859 Opc = X86ISD::SUBUS;
15865 std::swap(Op0, Op1);
15867 // Check that the operation in question is available (most are plain SSE2,
15868 // but PCMPGTQ and PCMPEQQ have different requirements).
15869 if (VT == MVT::v2i64) {
15870 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15871 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15873 // First cast everything to the right type.
15874 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15875 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15877 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15878 // bits of the inputs before performing those operations. The lower
15879 // compare is always unsigned.
15882 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15884 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15885 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15886 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15887 Sign, Zero, Sign, Zero);
15889 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15890 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15892 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15893 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15894 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15896 // Create masks for only the low parts/high parts of the 64 bit integers.
15897 static const int MaskHi[] = { 1, 1, 3, 3 };
15898 static const int MaskLo[] = { 0, 0, 2, 2 };
15899 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15900 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15901 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15903 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15904 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15907 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15909 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15912 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15913 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15914 // pcmpeqd + pshufd + pand.
15915 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15917 // First cast everything to the right type.
15918 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15919 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15922 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15924 // Make sure the lower and upper halves are both all-ones.
15925 static const int Mask[] = { 1, 0, 3, 2 };
15926 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15927 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15930 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15932 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15936 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15937 // bits of the inputs before performing those operations.
15939 EVT EltVT = VT.getVectorElementType();
15940 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15941 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15942 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15945 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15947 // If the logical-not of the result is required, perform that now.
15949 Result = DAG.getNOT(dl, Result, VT);
15952 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15955 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15956 getZeroVector(VT, Subtarget, DAG, dl));
15961 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15963 MVT VT = Op.getSimpleValueType();
15965 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15967 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15968 && "SetCC type must be 8-bit or 1-bit integer");
15969 SDValue Op0 = Op.getOperand(0);
15970 SDValue Op1 = Op.getOperand(1);
15972 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15974 // Optimize to BT if possible.
15975 // Lower (X & (1 << N)) == 0 to BT(X, N).
15976 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15977 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15978 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15979 Op1.getOpcode() == ISD::Constant &&
15980 cast<ConstantSDNode>(Op1)->isNullValue() &&
15981 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15982 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15983 if (NewSetCC.getNode()) {
15985 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15990 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15992 if (Op1.getOpcode() == ISD::Constant &&
15993 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
15994 cast<ConstantSDNode>(Op1)->isNullValue()) &&
15995 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15997 // If the input is a setcc, then reuse the input setcc or use a new one with
15998 // the inverted condition.
15999 if (Op0.getOpcode() == X86ISD::SETCC) {
16000 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16001 bool Invert = (CC == ISD::SETNE) ^
16002 cast<ConstantSDNode>(Op1)->isNullValue();
16006 CCode = X86::GetOppositeBranchCondition(CCode);
16007 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16008 DAG.getConstant(CCode, MVT::i8),
16009 Op0.getOperand(1));
16011 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16015 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16016 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16017 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16019 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16020 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16023 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16024 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16025 if (X86CC == X86::COND_INVALID)
16028 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16029 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16030 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16031 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16033 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16037 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16038 static bool isX86LogicalCmp(SDValue Op) {
16039 unsigned Opc = Op.getNode()->getOpcode();
16040 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16041 Opc == X86ISD::SAHF)
16043 if (Op.getResNo() == 1 &&
16044 (Opc == X86ISD::ADD ||
16045 Opc == X86ISD::SUB ||
16046 Opc == X86ISD::ADC ||
16047 Opc == X86ISD::SBB ||
16048 Opc == X86ISD::SMUL ||
16049 Opc == X86ISD::UMUL ||
16050 Opc == X86ISD::INC ||
16051 Opc == X86ISD::DEC ||
16052 Opc == X86ISD::OR ||
16053 Opc == X86ISD::XOR ||
16054 Opc == X86ISD::AND))
16057 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16063 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16064 if (V.getOpcode() != ISD::TRUNCATE)
16067 SDValue VOp0 = V.getOperand(0);
16068 unsigned InBits = VOp0.getValueSizeInBits();
16069 unsigned Bits = V.getValueSizeInBits();
16070 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16073 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16074 bool addTest = true;
16075 SDValue Cond = Op.getOperand(0);
16076 SDValue Op1 = Op.getOperand(1);
16077 SDValue Op2 = Op.getOperand(2);
16079 EVT VT = Op1.getValueType();
16082 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16083 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16084 // sequence later on.
16085 if (Cond.getOpcode() == ISD::SETCC &&
16086 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16087 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16088 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16089 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16090 int SSECC = translateX86FSETCC(
16091 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16094 if (Subtarget->hasAVX512()) {
16095 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16096 DAG.getConstant(SSECC, MVT::i8));
16097 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16099 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16100 DAG.getConstant(SSECC, MVT::i8));
16101 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16102 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16103 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16107 if (Cond.getOpcode() == ISD::SETCC) {
16108 SDValue NewCond = LowerSETCC(Cond, DAG);
16109 if (NewCond.getNode())
16113 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16114 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16115 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16116 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16117 if (Cond.getOpcode() == X86ISD::SETCC &&
16118 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16119 isZero(Cond.getOperand(1).getOperand(1))) {
16120 SDValue Cmp = Cond.getOperand(1);
16122 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16124 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16125 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16126 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16128 SDValue CmpOp0 = Cmp.getOperand(0);
16129 // Apply further optimizations for special cases
16130 // (select (x != 0), -1, 0) -> neg & sbb
16131 // (select (x == 0), 0, -1) -> neg & sbb
16132 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16133 if (YC->isNullValue() &&
16134 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16135 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16136 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16137 DAG.getConstant(0, CmpOp0.getValueType()),
16139 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16140 DAG.getConstant(X86::COND_B, MVT::i8),
16141 SDValue(Neg.getNode(), 1));
16145 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16146 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16147 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16149 SDValue Res = // Res = 0 or -1.
16150 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16151 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16153 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16154 Res = DAG.getNOT(DL, Res, Res.getValueType());
16156 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16157 if (!N2C || !N2C->isNullValue())
16158 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16163 // Look past (and (setcc_carry (cmp ...)), 1).
16164 if (Cond.getOpcode() == ISD::AND &&
16165 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16166 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16167 if (C && C->getAPIntValue() == 1)
16168 Cond = Cond.getOperand(0);
16171 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16172 // setting operand in place of the X86ISD::SETCC.
16173 unsigned CondOpcode = Cond.getOpcode();
16174 if (CondOpcode == X86ISD::SETCC ||
16175 CondOpcode == X86ISD::SETCC_CARRY) {
16176 CC = Cond.getOperand(0);
16178 SDValue Cmp = Cond.getOperand(1);
16179 unsigned Opc = Cmp.getOpcode();
16180 MVT VT = Op.getSimpleValueType();
16182 bool IllegalFPCMov = false;
16183 if (VT.isFloatingPoint() && !VT.isVector() &&
16184 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16185 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16187 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16188 Opc == X86ISD::BT) { // FIXME
16192 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16193 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16194 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16195 Cond.getOperand(0).getValueType() != MVT::i8)) {
16196 SDValue LHS = Cond.getOperand(0);
16197 SDValue RHS = Cond.getOperand(1);
16198 unsigned X86Opcode;
16201 switch (CondOpcode) {
16202 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16203 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16204 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16205 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16206 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16207 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16208 default: llvm_unreachable("unexpected overflowing operator");
16210 if (CondOpcode == ISD::UMULO)
16211 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16214 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16216 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16218 if (CondOpcode == ISD::UMULO)
16219 Cond = X86Op.getValue(2);
16221 Cond = X86Op.getValue(1);
16223 CC = DAG.getConstant(X86Cond, MVT::i8);
16228 // Look pass the truncate if the high bits are known zero.
16229 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16230 Cond = Cond.getOperand(0);
16232 // We know the result of AND is compared against zero. Try to match
16234 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16235 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16236 if (NewSetCC.getNode()) {
16237 CC = NewSetCC.getOperand(0);
16238 Cond = NewSetCC.getOperand(1);
16245 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16246 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16249 // a < b ? -1 : 0 -> RES = ~setcc_carry
16250 // a < b ? 0 : -1 -> RES = setcc_carry
16251 // a >= b ? -1 : 0 -> RES = setcc_carry
16252 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16253 if (Cond.getOpcode() == X86ISD::SUB) {
16254 Cond = ConvertCmpIfNecessary(Cond, DAG);
16255 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16257 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16258 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16259 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16260 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16261 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16262 return DAG.getNOT(DL, Res, Res.getValueType());
16267 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16268 // widen the cmov and push the truncate through. This avoids introducing a new
16269 // branch during isel and doesn't add any extensions.
16270 if (Op.getValueType() == MVT::i8 &&
16271 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16272 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16273 if (T1.getValueType() == T2.getValueType() &&
16274 // Blacklist CopyFromReg to avoid partial register stalls.
16275 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16276 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16277 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16278 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16282 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16283 // condition is true.
16284 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16285 SDValue Ops[] = { Op2, Op1, CC, Cond };
16286 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16289 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16290 SelectionDAG &DAG) {
16291 MVT VT = Op->getSimpleValueType(0);
16292 SDValue In = Op->getOperand(0);
16293 MVT InVT = In.getSimpleValueType();
16294 MVT VTElt = VT.getVectorElementType();
16295 MVT InVTElt = InVT.getVectorElementType();
16299 if ((InVTElt == MVT::i1) &&
16300 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16301 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16303 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16304 VTElt.getSizeInBits() <= 16)) ||
16306 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16307 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16309 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16310 VTElt.getSizeInBits() >= 32))))
16311 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16313 unsigned int NumElts = VT.getVectorNumElements();
16315 if (NumElts != 8 && NumElts != 16)
16318 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16319 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16320 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16321 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16324 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16325 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16327 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16328 Constant *C = ConstantInt::get(*DAG.getContext(),
16329 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16331 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16332 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16333 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16334 MachinePointerInfo::getConstantPool(),
16335 false, false, false, Alignment);
16336 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16337 if (VT.is512BitVector())
16339 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16342 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16343 SelectionDAG &DAG) {
16344 MVT VT = Op->getSimpleValueType(0);
16345 SDValue In = Op->getOperand(0);
16346 MVT InVT = In.getSimpleValueType();
16349 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16350 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16352 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16353 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16354 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16357 if (Subtarget->hasInt256())
16358 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16360 // Optimize vectors in AVX mode
16361 // Sign extend v8i16 to v8i32 and
16364 // Divide input vector into two parts
16365 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16366 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16367 // concat the vectors to original VT
16369 unsigned NumElems = InVT.getVectorNumElements();
16370 SDValue Undef = DAG.getUNDEF(InVT);
16372 SmallVector<int,8> ShufMask1(NumElems, -1);
16373 for (unsigned i = 0; i != NumElems/2; ++i)
16376 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16378 SmallVector<int,8> ShufMask2(NumElems, -1);
16379 for (unsigned i = 0; i != NumElems/2; ++i)
16380 ShufMask2[i] = i + NumElems/2;
16382 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16384 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16385 VT.getVectorNumElements()/2);
16387 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16388 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16390 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16393 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16394 // may emit an illegal shuffle but the expansion is still better than scalar
16395 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16396 // we'll emit a shuffle and a arithmetic shift.
16397 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16398 // TODO: It is possible to support ZExt by zeroing the undef values during
16399 // the shuffle phase or after the shuffle.
16400 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16401 SelectionDAG &DAG) {
16402 MVT RegVT = Op.getSimpleValueType();
16403 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16404 assert(RegVT.isInteger() &&
16405 "We only custom lower integer vector sext loads.");
16407 // Nothing useful we can do without SSE2 shuffles.
16408 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16410 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16412 EVT MemVT = Ld->getMemoryVT();
16413 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16414 unsigned RegSz = RegVT.getSizeInBits();
16416 ISD::LoadExtType Ext = Ld->getExtensionType();
16418 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16419 && "Only anyext and sext are currently implemented.");
16420 assert(MemVT != RegVT && "Cannot extend to the same type");
16421 assert(MemVT.isVector() && "Must load a vector from memory");
16423 unsigned NumElems = RegVT.getVectorNumElements();
16424 unsigned MemSz = MemVT.getSizeInBits();
16425 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16427 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16428 // The only way in which we have a legal 256-bit vector result but not the
16429 // integer 256-bit operations needed to directly lower a sextload is if we
16430 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16431 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16432 // correctly legalized. We do this late to allow the canonical form of
16433 // sextload to persist throughout the rest of the DAG combiner -- it wants
16434 // to fold together any extensions it can, and so will fuse a sign_extend
16435 // of an sextload into a sextload targeting a wider value.
16437 if (MemSz == 128) {
16438 // Just switch this to a normal load.
16439 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16440 "it must be a legal 128-bit vector "
16442 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16443 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16444 Ld->isInvariant(), Ld->getAlignment());
16446 assert(MemSz < 128 &&
16447 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16448 // Do an sext load to a 128-bit vector type. We want to use the same
16449 // number of elements, but elements half as wide. This will end up being
16450 // recursively lowered by this routine, but will succeed as we definitely
16451 // have all the necessary features if we're using AVX1.
16453 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16454 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16456 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16457 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16458 Ld->isNonTemporal(), Ld->isInvariant(),
16459 Ld->getAlignment());
16462 // Replace chain users with the new chain.
16463 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16464 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16466 // Finally, do a normal sign-extend to the desired register.
16467 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16470 // All sizes must be a power of two.
16471 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16472 "Non-power-of-two elements are not custom lowered!");
16474 // Attempt to load the original value using scalar loads.
16475 // Find the largest scalar type that divides the total loaded size.
16476 MVT SclrLoadTy = MVT::i8;
16477 for (MVT Tp : MVT::integer_valuetypes()) {
16478 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16483 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16484 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16486 SclrLoadTy = MVT::f64;
16488 // Calculate the number of scalar loads that we need to perform
16489 // in order to load our vector from memory.
16490 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16492 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16493 "Can only lower sext loads with a single scalar load!");
16495 unsigned loadRegZize = RegSz;
16496 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16499 // Represent our vector as a sequence of elements which are the
16500 // largest scalar that we can load.
16501 EVT LoadUnitVecVT = EVT::getVectorVT(
16502 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16504 // Represent the data using the same element type that is stored in
16505 // memory. In practice, we ''widen'' MemVT.
16507 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16508 loadRegZize / MemVT.getScalarType().getSizeInBits());
16510 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16511 "Invalid vector type");
16513 // We can't shuffle using an illegal type.
16514 assert(TLI.isTypeLegal(WideVecVT) &&
16515 "We only lower types that form legal widened vector types");
16517 SmallVector<SDValue, 8> Chains;
16518 SDValue Ptr = Ld->getBasePtr();
16519 SDValue Increment =
16520 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16521 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16523 for (unsigned i = 0; i < NumLoads; ++i) {
16524 // Perform a single load.
16525 SDValue ScalarLoad =
16526 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16527 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16528 Ld->getAlignment());
16529 Chains.push_back(ScalarLoad.getValue(1));
16530 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16531 // another round of DAGCombining.
16533 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16535 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16536 ScalarLoad, DAG.getIntPtrConstant(i));
16538 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16541 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16543 // Bitcast the loaded value to a vector of the original element type, in
16544 // the size of the target vector type.
16545 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16546 unsigned SizeRatio = RegSz / MemSz;
16548 if (Ext == ISD::SEXTLOAD) {
16549 // If we have SSE4.1, we can directly emit a VSEXT node.
16550 if (Subtarget->hasSSE41()) {
16551 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16552 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16556 // Otherwise we'll shuffle the small elements in the high bits of the
16557 // larger type and perform an arithmetic shift. If the shift is not legal
16558 // it's better to scalarize.
16559 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16560 "We can't implement a sext load without an arithmetic right shift!");
16562 // Redistribute the loaded elements into the different locations.
16563 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16564 for (unsigned i = 0; i != NumElems; ++i)
16565 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16567 SDValue Shuff = DAG.getVectorShuffle(
16568 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16570 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16572 // Build the arithmetic shift.
16573 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16574 MemVT.getVectorElementType().getSizeInBits();
16576 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16578 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16582 // Redistribute the loaded elements into the different locations.
16583 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16584 for (unsigned i = 0; i != NumElems; ++i)
16585 ShuffleVec[i * SizeRatio] = i;
16587 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16588 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16590 // Bitcast to the requested type.
16591 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16592 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16596 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16597 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16598 // from the AND / OR.
16599 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16600 Opc = Op.getOpcode();
16601 if (Opc != ISD::OR && Opc != ISD::AND)
16603 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16604 Op.getOperand(0).hasOneUse() &&
16605 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16606 Op.getOperand(1).hasOneUse());
16609 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16610 // 1 and that the SETCC node has a single use.
16611 static bool isXor1OfSetCC(SDValue Op) {
16612 if (Op.getOpcode() != ISD::XOR)
16614 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16615 if (N1C && N1C->getAPIntValue() == 1) {
16616 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16617 Op.getOperand(0).hasOneUse();
16622 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16623 bool addTest = true;
16624 SDValue Chain = Op.getOperand(0);
16625 SDValue Cond = Op.getOperand(1);
16626 SDValue Dest = Op.getOperand(2);
16629 bool Inverted = false;
16631 if (Cond.getOpcode() == ISD::SETCC) {
16632 // Check for setcc([su]{add,sub,mul}o == 0).
16633 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16634 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16635 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16636 Cond.getOperand(0).getResNo() == 1 &&
16637 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16638 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16639 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16640 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16641 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16642 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16644 Cond = Cond.getOperand(0);
16646 SDValue NewCond = LowerSETCC(Cond, DAG);
16647 if (NewCond.getNode())
16652 // FIXME: LowerXALUO doesn't handle these!!
16653 else if (Cond.getOpcode() == X86ISD::ADD ||
16654 Cond.getOpcode() == X86ISD::SUB ||
16655 Cond.getOpcode() == X86ISD::SMUL ||
16656 Cond.getOpcode() == X86ISD::UMUL)
16657 Cond = LowerXALUO(Cond, DAG);
16660 // Look pass (and (setcc_carry (cmp ...)), 1).
16661 if (Cond.getOpcode() == ISD::AND &&
16662 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16663 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16664 if (C && C->getAPIntValue() == 1)
16665 Cond = Cond.getOperand(0);
16668 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16669 // setting operand in place of the X86ISD::SETCC.
16670 unsigned CondOpcode = Cond.getOpcode();
16671 if (CondOpcode == X86ISD::SETCC ||
16672 CondOpcode == X86ISD::SETCC_CARRY) {
16673 CC = Cond.getOperand(0);
16675 SDValue Cmp = Cond.getOperand(1);
16676 unsigned Opc = Cmp.getOpcode();
16677 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16678 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16682 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16686 // These can only come from an arithmetic instruction with overflow,
16687 // e.g. SADDO, UADDO.
16688 Cond = Cond.getNode()->getOperand(1);
16694 CondOpcode = Cond.getOpcode();
16695 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16696 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16697 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16698 Cond.getOperand(0).getValueType() != MVT::i8)) {
16699 SDValue LHS = Cond.getOperand(0);
16700 SDValue RHS = Cond.getOperand(1);
16701 unsigned X86Opcode;
16704 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16705 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16707 switch (CondOpcode) {
16708 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16710 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16712 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16715 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16716 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16718 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16720 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16723 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16724 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16725 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16726 default: llvm_unreachable("unexpected overflowing operator");
16729 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16730 if (CondOpcode == ISD::UMULO)
16731 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16734 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16736 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16738 if (CondOpcode == ISD::UMULO)
16739 Cond = X86Op.getValue(2);
16741 Cond = X86Op.getValue(1);
16743 CC = DAG.getConstant(X86Cond, MVT::i8);
16747 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16748 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16749 if (CondOpc == ISD::OR) {
16750 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16751 // two branches instead of an explicit OR instruction with a
16753 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16754 isX86LogicalCmp(Cmp)) {
16755 CC = Cond.getOperand(0).getOperand(0);
16756 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16757 Chain, Dest, CC, Cmp);
16758 CC = Cond.getOperand(1).getOperand(0);
16762 } else { // ISD::AND
16763 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16764 // two branches instead of an explicit AND instruction with a
16765 // separate test. However, we only do this if this block doesn't
16766 // have a fall-through edge, because this requires an explicit
16767 // jmp when the condition is false.
16768 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16769 isX86LogicalCmp(Cmp) &&
16770 Op.getNode()->hasOneUse()) {
16771 X86::CondCode CCode =
16772 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16773 CCode = X86::GetOppositeBranchCondition(CCode);
16774 CC = DAG.getConstant(CCode, MVT::i8);
16775 SDNode *User = *Op.getNode()->use_begin();
16776 // Look for an unconditional branch following this conditional branch.
16777 // We need this because we need to reverse the successors in order
16778 // to implement FCMP_OEQ.
16779 if (User->getOpcode() == ISD::BR) {
16780 SDValue FalseBB = User->getOperand(1);
16782 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16783 assert(NewBR == User);
16787 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16788 Chain, Dest, CC, Cmp);
16789 X86::CondCode CCode =
16790 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16791 CCode = X86::GetOppositeBranchCondition(CCode);
16792 CC = DAG.getConstant(CCode, MVT::i8);
16798 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16799 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16800 // It should be transformed during dag combiner except when the condition
16801 // is set by a arithmetics with overflow node.
16802 X86::CondCode CCode =
16803 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16804 CCode = X86::GetOppositeBranchCondition(CCode);
16805 CC = DAG.getConstant(CCode, MVT::i8);
16806 Cond = Cond.getOperand(0).getOperand(1);
16808 } else if (Cond.getOpcode() == ISD::SETCC &&
16809 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16810 // For FCMP_OEQ, we can emit
16811 // two branches instead of an explicit AND instruction with a
16812 // separate test. However, we only do this if this block doesn't
16813 // have a fall-through edge, because this requires an explicit
16814 // jmp when the condition is false.
16815 if (Op.getNode()->hasOneUse()) {
16816 SDNode *User = *Op.getNode()->use_begin();
16817 // Look for an unconditional branch following this conditional branch.
16818 // We need this because we need to reverse the successors in order
16819 // to implement FCMP_OEQ.
16820 if (User->getOpcode() == ISD::BR) {
16821 SDValue FalseBB = User->getOperand(1);
16823 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16824 assert(NewBR == User);
16828 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16829 Cond.getOperand(0), Cond.getOperand(1));
16830 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16831 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16832 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16833 Chain, Dest, CC, Cmp);
16834 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16839 } else if (Cond.getOpcode() == ISD::SETCC &&
16840 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16841 // For FCMP_UNE, we can emit
16842 // two branches instead of an explicit AND instruction with a
16843 // separate test. However, we only do this if this block doesn't
16844 // have a fall-through edge, because this requires an explicit
16845 // jmp when the condition is false.
16846 if (Op.getNode()->hasOneUse()) {
16847 SDNode *User = *Op.getNode()->use_begin();
16848 // Look for an unconditional branch following this conditional branch.
16849 // We need this because we need to reverse the successors in order
16850 // to implement FCMP_UNE.
16851 if (User->getOpcode() == ISD::BR) {
16852 SDValue FalseBB = User->getOperand(1);
16854 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16855 assert(NewBR == User);
16858 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16859 Cond.getOperand(0), Cond.getOperand(1));
16860 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16861 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16862 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16863 Chain, Dest, CC, Cmp);
16864 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16874 // Look pass the truncate if the high bits are known zero.
16875 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16876 Cond = Cond.getOperand(0);
16878 // We know the result of AND is compared against zero. Try to match
16880 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16881 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16882 if (NewSetCC.getNode()) {
16883 CC = NewSetCC.getOperand(0);
16884 Cond = NewSetCC.getOperand(1);
16891 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16892 CC = DAG.getConstant(X86Cond, MVT::i8);
16893 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16895 Cond = ConvertCmpIfNecessary(Cond, DAG);
16896 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16897 Chain, Dest, CC, Cond);
16900 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16901 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16902 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16903 // that the guard pages used by the OS virtual memory manager are allocated in
16904 // correct sequence.
16906 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16907 SelectionDAG &DAG) const {
16908 MachineFunction &MF = DAG.getMachineFunction();
16909 bool SplitStack = MF.shouldSplitStack();
16910 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16915 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16916 SDNode* Node = Op.getNode();
16918 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16919 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16920 " not tell us which reg is the stack pointer!");
16921 EVT VT = Node->getValueType(0);
16922 SDValue Tmp1 = SDValue(Node, 0);
16923 SDValue Tmp2 = SDValue(Node, 1);
16924 SDValue Tmp3 = Node->getOperand(2);
16925 SDValue Chain = Tmp1.getOperand(0);
16927 // Chain the dynamic stack allocation so that it doesn't modify the stack
16928 // pointer when other instructions are using the stack.
16929 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16932 SDValue Size = Tmp2.getOperand(1);
16933 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16934 Chain = SP.getValue(1);
16935 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16936 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16937 unsigned StackAlign = TFI.getStackAlignment();
16938 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16939 if (Align > StackAlign)
16940 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16941 DAG.getConstant(-(uint64_t)Align, VT));
16942 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16944 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16945 DAG.getIntPtrConstant(0, true), SDValue(),
16948 SDValue Ops[2] = { Tmp1, Tmp2 };
16949 return DAG.getMergeValues(Ops, dl);
16953 SDValue Chain = Op.getOperand(0);
16954 SDValue Size = Op.getOperand(1);
16955 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16956 EVT VT = Op.getNode()->getValueType(0);
16958 bool Is64Bit = Subtarget->is64Bit();
16959 EVT SPTy = getPointerTy();
16962 MachineRegisterInfo &MRI = MF.getRegInfo();
16965 // The 64 bit implementation of segmented stacks needs to clobber both r10
16966 // r11. This makes it impossible to use it along with nested parameters.
16967 const Function *F = MF.getFunction();
16969 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16971 if (I->hasNestAttr())
16972 report_fatal_error("Cannot use segmented stacks with functions that "
16973 "have nested arguments.");
16976 const TargetRegisterClass *AddrRegClass =
16977 getRegClassFor(getPointerTy());
16978 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16979 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16980 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16981 DAG.getRegister(Vreg, SPTy));
16982 SDValue Ops1[2] = { Value, Chain };
16983 return DAG.getMergeValues(Ops1, dl);
16986 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16988 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16989 Flag = Chain.getValue(1);
16990 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16992 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
16994 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
16995 unsigned SPReg = RegInfo->getStackRegister();
16996 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16997 Chain = SP.getValue(1);
17000 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17001 DAG.getConstant(-(uint64_t)Align, VT));
17002 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17005 SDValue Ops1[2] = { SP, Chain };
17006 return DAG.getMergeValues(Ops1, dl);
17010 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17011 MachineFunction &MF = DAG.getMachineFunction();
17012 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17014 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17017 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17018 // vastart just stores the address of the VarArgsFrameIndex slot into the
17019 // memory location argument.
17020 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17022 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17023 MachinePointerInfo(SV), false, false, 0);
17027 // gp_offset (0 - 6 * 8)
17028 // fp_offset (48 - 48 + 8 * 16)
17029 // overflow_arg_area (point to parameters coming in memory).
17031 SmallVector<SDValue, 8> MemOps;
17032 SDValue FIN = Op.getOperand(1);
17034 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17035 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17037 FIN, MachinePointerInfo(SV), false, false, 0);
17038 MemOps.push_back(Store);
17041 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17042 FIN, DAG.getIntPtrConstant(4));
17043 Store = DAG.getStore(Op.getOperand(0), DL,
17044 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17046 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17047 MemOps.push_back(Store);
17049 // Store ptr to overflow_arg_area
17050 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17051 FIN, DAG.getIntPtrConstant(4));
17052 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17054 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17055 MachinePointerInfo(SV, 8),
17057 MemOps.push_back(Store);
17059 // Store ptr to reg_save_area.
17060 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17061 FIN, DAG.getIntPtrConstant(8));
17062 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17064 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17065 MachinePointerInfo(SV, 16), false, false, 0);
17066 MemOps.push_back(Store);
17067 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17070 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17071 assert(Subtarget->is64Bit() &&
17072 "LowerVAARG only handles 64-bit va_arg!");
17073 assert((Subtarget->isTargetLinux() ||
17074 Subtarget->isTargetDarwin()) &&
17075 "Unhandled target in LowerVAARG");
17076 assert(Op.getNode()->getNumOperands() == 4);
17077 SDValue Chain = Op.getOperand(0);
17078 SDValue SrcPtr = Op.getOperand(1);
17079 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17080 unsigned Align = Op.getConstantOperandVal(3);
17083 EVT ArgVT = Op.getNode()->getValueType(0);
17084 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17085 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17088 // Decide which area this value should be read from.
17089 // TODO: Implement the AMD64 ABI in its entirety. This simple
17090 // selection mechanism works only for the basic types.
17091 if (ArgVT == MVT::f80) {
17092 llvm_unreachable("va_arg for f80 not yet implemented");
17093 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17094 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17095 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17096 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17098 llvm_unreachable("Unhandled argument type in LowerVAARG");
17101 if (ArgMode == 2) {
17102 // Sanity Check: Make sure using fp_offset makes sense.
17103 assert(!DAG.getTarget().Options.UseSoftFloat &&
17104 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17105 Attribute::NoImplicitFloat)) &&
17106 Subtarget->hasSSE1());
17109 // Insert VAARG_64 node into the DAG
17110 // VAARG_64 returns two values: Variable Argument Address, Chain
17111 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, MVT::i32),
17112 DAG.getConstant(ArgMode, MVT::i8),
17113 DAG.getConstant(Align, MVT::i32)};
17114 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17115 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17116 VTs, InstOps, MVT::i64,
17117 MachinePointerInfo(SV),
17119 /*Volatile=*/false,
17121 /*WriteMem=*/true);
17122 Chain = VAARG.getValue(1);
17124 // Load the next argument and return it
17125 return DAG.getLoad(ArgVT, dl,
17128 MachinePointerInfo(),
17129 false, false, false, 0);
17132 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17133 SelectionDAG &DAG) {
17134 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17135 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17136 SDValue Chain = Op.getOperand(0);
17137 SDValue DstPtr = Op.getOperand(1);
17138 SDValue SrcPtr = Op.getOperand(2);
17139 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17140 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17143 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17144 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17146 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17149 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17150 // amount is a constant. Takes immediate version of shift as input.
17151 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17152 SDValue SrcOp, uint64_t ShiftAmt,
17153 SelectionDAG &DAG) {
17154 MVT ElementType = VT.getVectorElementType();
17156 // Fold this packed shift into its first operand if ShiftAmt is 0.
17160 // Check for ShiftAmt >= element width
17161 if (ShiftAmt >= ElementType.getSizeInBits()) {
17162 if (Opc == X86ISD::VSRAI)
17163 ShiftAmt = ElementType.getSizeInBits() - 1;
17165 return DAG.getConstant(0, VT);
17168 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17169 && "Unknown target vector shift-by-constant node");
17171 // Fold this packed vector shift into a build vector if SrcOp is a
17172 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17173 if (VT == SrcOp.getSimpleValueType() &&
17174 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17175 SmallVector<SDValue, 8> Elts;
17176 unsigned NumElts = SrcOp->getNumOperands();
17177 ConstantSDNode *ND;
17180 default: llvm_unreachable(nullptr);
17181 case X86ISD::VSHLI:
17182 for (unsigned i=0; i!=NumElts; ++i) {
17183 SDValue CurrentOp = SrcOp->getOperand(i);
17184 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17185 Elts.push_back(CurrentOp);
17188 ND = cast<ConstantSDNode>(CurrentOp);
17189 const APInt &C = ND->getAPIntValue();
17190 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17193 case X86ISD::VSRLI:
17194 for (unsigned i=0; i!=NumElts; ++i) {
17195 SDValue CurrentOp = SrcOp->getOperand(i);
17196 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17197 Elts.push_back(CurrentOp);
17200 ND = cast<ConstantSDNode>(CurrentOp);
17201 const APInt &C = ND->getAPIntValue();
17202 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17205 case X86ISD::VSRAI:
17206 for (unsigned i=0; i!=NumElts; ++i) {
17207 SDValue CurrentOp = SrcOp->getOperand(i);
17208 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17209 Elts.push_back(CurrentOp);
17212 ND = cast<ConstantSDNode>(CurrentOp);
17213 const APInt &C = ND->getAPIntValue();
17214 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17219 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17222 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17225 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17226 // may or may not be a constant. Takes immediate version of shift as input.
17227 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17228 SDValue SrcOp, SDValue ShAmt,
17229 SelectionDAG &DAG) {
17230 MVT SVT = ShAmt.getSimpleValueType();
17231 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17233 // Catch shift-by-constant.
17234 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17235 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17236 CShAmt->getZExtValue(), DAG);
17238 // Change opcode to non-immediate version
17240 default: llvm_unreachable("Unknown target vector shift node");
17241 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17242 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17243 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17246 const X86Subtarget &Subtarget =
17247 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17248 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17249 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17250 // Let the shuffle legalizer expand this shift amount node.
17251 SDValue Op0 = ShAmt.getOperand(0);
17252 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17253 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17255 // Need to build a vector containing shift amount.
17256 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17257 SmallVector<SDValue, 4> ShOps;
17258 ShOps.push_back(ShAmt);
17259 if (SVT == MVT::i32) {
17260 ShOps.push_back(DAG.getConstant(0, SVT));
17261 ShOps.push_back(DAG.getUNDEF(SVT));
17263 ShOps.push_back(DAG.getUNDEF(SVT));
17265 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17266 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17269 // The return type has to be a 128-bit type with the same element
17270 // type as the input type.
17271 MVT EltVT = VT.getVectorElementType();
17272 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17274 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17275 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17278 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17279 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17280 /// necessary casting for \p Mask when lowering masking intrinsics.
17281 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17282 SDValue PreservedSrc,
17283 const X86Subtarget *Subtarget,
17284 SelectionDAG &DAG) {
17285 EVT VT = Op.getValueType();
17286 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17287 MVT::i1, VT.getVectorNumElements());
17288 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17289 Mask.getValueType().getSizeInBits());
17292 assert(MaskVT.isSimple() && "invalid mask type");
17294 if (isAllOnes(Mask))
17297 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17298 // are extracted by EXTRACT_SUBVECTOR.
17299 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17300 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17301 DAG.getIntPtrConstant(0));
17303 switch (Op.getOpcode()) {
17305 case X86ISD::PCMPEQM:
17306 case X86ISD::PCMPGTM:
17308 case X86ISD::CMPMU:
17309 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17311 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17312 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17313 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17316 /// \brief Creates an SDNode for a predicated scalar operation.
17317 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17318 /// The mask is comming as MVT::i8 and it should be truncated
17319 /// to MVT::i1 while lowering masking intrinsics.
17320 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17321 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17322 /// a scalar instruction.
17323 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17324 SDValue PreservedSrc,
17325 const X86Subtarget *Subtarget,
17326 SelectionDAG &DAG) {
17327 if (isAllOnes(Mask))
17330 EVT VT = Op.getValueType();
17332 // The mask should be of type MVT::i1
17333 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17335 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17336 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17337 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17340 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17341 SelectionDAG &DAG) {
17343 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17344 EVT VT = Op.getValueType();
17345 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17347 switch(IntrData->Type) {
17348 case INTR_TYPE_1OP:
17349 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17350 case INTR_TYPE_2OP:
17351 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17353 case INTR_TYPE_3OP:
17354 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17355 Op.getOperand(2), Op.getOperand(3));
17356 case INTR_TYPE_1OP_MASK_RM: {
17357 SDValue Src = Op.getOperand(1);
17358 SDValue Src0 = Op.getOperand(2);
17359 SDValue Mask = Op.getOperand(3);
17360 SDValue RoundingMode = Op.getOperand(4);
17361 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17363 Mask, Src0, Subtarget, DAG);
17365 case INTR_TYPE_SCALAR_MASK_RM: {
17366 SDValue Src1 = Op.getOperand(1);
17367 SDValue Src2 = Op.getOperand(2);
17368 SDValue Src0 = Op.getOperand(3);
17369 SDValue Mask = Op.getOperand(4);
17370 SDValue RoundingMode = Op.getOperand(5);
17371 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17373 Mask, Src0, Subtarget, DAG);
17375 case INTR_TYPE_2OP_MASK: {
17376 SDValue Src1 = Op.getOperand(1);
17377 SDValue Src2 = Op.getOperand(2);
17378 SDValue PassThru = Op.getOperand(3);
17379 SDValue Mask = Op.getOperand(4);
17380 // We specify 2 possible opcodes for intrinsics with rounding modes.
17381 // First, we check if the intrinsic may have non-default rounding mode,
17382 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17383 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17384 if (IntrWithRoundingModeOpcode != 0) {
17385 SDValue Rnd = Op.getOperand(5);
17386 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
17387 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17388 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17389 dl, Op.getValueType(),
17391 Mask, PassThru, Subtarget, DAG);
17394 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17396 Mask, PassThru, Subtarget, DAG);
17398 case FMA_OP_MASK: {
17399 SDValue Src1 = Op.getOperand(1);
17400 SDValue Src2 = Op.getOperand(2);
17401 SDValue Src3 = Op.getOperand(3);
17402 SDValue Mask = Op.getOperand(4);
17403 // We specify 2 possible opcodes for intrinsics with rounding modes.
17404 // First, we check if the intrinsic may have non-default rounding mode,
17405 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17406 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17407 if (IntrWithRoundingModeOpcode != 0) {
17408 SDValue Rnd = Op.getOperand(5);
17409 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17410 X86::STATIC_ROUNDING::CUR_DIRECTION)
17411 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17412 dl, Op.getValueType(),
17413 Src1, Src2, Src3, Rnd),
17414 Mask, Src1, Subtarget, DAG);
17416 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17417 dl, Op.getValueType(),
17419 Mask, Src1, Subtarget, DAG);
17422 case CMP_MASK_CC: {
17423 // Comparison intrinsics with masks.
17424 // Example of transformation:
17425 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17426 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17428 // (v8i1 (insert_subvector undef,
17429 // (v2i1 (and (PCMPEQM %a, %b),
17430 // (extract_subvector
17431 // (v8i1 (bitcast %mask)), 0))), 0))))
17432 EVT VT = Op.getOperand(1).getValueType();
17433 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17434 VT.getVectorNumElements());
17435 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17436 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17437 Mask.getValueType().getSizeInBits());
17439 if (IntrData->Type == CMP_MASK_CC) {
17440 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17441 Op.getOperand(2), Op.getOperand(3));
17443 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17444 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17447 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17448 DAG.getTargetConstant(0, MaskVT),
17450 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17451 DAG.getUNDEF(BitcastVT), CmpMask,
17452 DAG.getIntPtrConstant(0));
17453 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17455 case COMI: { // Comparison intrinsics
17456 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17457 SDValue LHS = Op.getOperand(1);
17458 SDValue RHS = Op.getOperand(2);
17459 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17460 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17461 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17462 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17463 DAG.getConstant(X86CC, MVT::i8), Cond);
17464 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17467 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17468 Op.getOperand(1), Op.getOperand(2), DAG);
17470 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17471 Op.getSimpleValueType(),
17473 Op.getOperand(2), DAG),
17474 Op.getOperand(4), Op.getOperand(3), Subtarget,
17476 case COMPRESS_EXPAND_IN_REG: {
17477 SDValue Mask = Op.getOperand(3);
17478 SDValue DataToCompress = Op.getOperand(1);
17479 SDValue PassThru = Op.getOperand(2);
17480 if (isAllOnes(Mask)) // return data as is
17481 return Op.getOperand(1);
17482 EVT VT = Op.getValueType();
17483 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17484 VT.getVectorNumElements());
17485 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17486 Mask.getValueType().getSizeInBits());
17488 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17489 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17490 DAG.getIntPtrConstant(0));
17492 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17496 SDValue Mask = Op.getOperand(3);
17497 EVT VT = Op.getValueType();
17498 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17499 VT.getVectorNumElements());
17500 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17501 Mask.getValueType().getSizeInBits());
17503 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17504 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17505 DAG.getIntPtrConstant(0));
17506 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17515 default: return SDValue(); // Don't custom lower most intrinsics.
17517 case Intrinsic::x86_avx512_mask_valign_q_512:
17518 case Intrinsic::x86_avx512_mask_valign_d_512:
17519 // Vector source operands are swapped.
17520 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17521 Op.getValueType(), Op.getOperand(2),
17524 Op.getOperand(5), Op.getOperand(4),
17527 // ptest and testp intrinsics. The intrinsic these come from are designed to
17528 // return an integer value, not just an instruction so lower it to the ptest
17529 // or testp pattern and a setcc for the result.
17530 case Intrinsic::x86_sse41_ptestz:
17531 case Intrinsic::x86_sse41_ptestc:
17532 case Intrinsic::x86_sse41_ptestnzc:
17533 case Intrinsic::x86_avx_ptestz_256:
17534 case Intrinsic::x86_avx_ptestc_256:
17535 case Intrinsic::x86_avx_ptestnzc_256:
17536 case Intrinsic::x86_avx_vtestz_ps:
17537 case Intrinsic::x86_avx_vtestc_ps:
17538 case Intrinsic::x86_avx_vtestnzc_ps:
17539 case Intrinsic::x86_avx_vtestz_pd:
17540 case Intrinsic::x86_avx_vtestc_pd:
17541 case Intrinsic::x86_avx_vtestnzc_pd:
17542 case Intrinsic::x86_avx_vtestz_ps_256:
17543 case Intrinsic::x86_avx_vtestc_ps_256:
17544 case Intrinsic::x86_avx_vtestnzc_ps_256:
17545 case Intrinsic::x86_avx_vtestz_pd_256:
17546 case Intrinsic::x86_avx_vtestc_pd_256:
17547 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17548 bool IsTestPacked = false;
17551 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17552 case Intrinsic::x86_avx_vtestz_ps:
17553 case Intrinsic::x86_avx_vtestz_pd:
17554 case Intrinsic::x86_avx_vtestz_ps_256:
17555 case Intrinsic::x86_avx_vtestz_pd_256:
17556 IsTestPacked = true; // Fallthrough
17557 case Intrinsic::x86_sse41_ptestz:
17558 case Intrinsic::x86_avx_ptestz_256:
17560 X86CC = X86::COND_E;
17562 case Intrinsic::x86_avx_vtestc_ps:
17563 case Intrinsic::x86_avx_vtestc_pd:
17564 case Intrinsic::x86_avx_vtestc_ps_256:
17565 case Intrinsic::x86_avx_vtestc_pd_256:
17566 IsTestPacked = true; // Fallthrough
17567 case Intrinsic::x86_sse41_ptestc:
17568 case Intrinsic::x86_avx_ptestc_256:
17570 X86CC = X86::COND_B;
17572 case Intrinsic::x86_avx_vtestnzc_ps:
17573 case Intrinsic::x86_avx_vtestnzc_pd:
17574 case Intrinsic::x86_avx_vtestnzc_ps_256:
17575 case Intrinsic::x86_avx_vtestnzc_pd_256:
17576 IsTestPacked = true; // Fallthrough
17577 case Intrinsic::x86_sse41_ptestnzc:
17578 case Intrinsic::x86_avx_ptestnzc_256:
17580 X86CC = X86::COND_A;
17584 SDValue LHS = Op.getOperand(1);
17585 SDValue RHS = Op.getOperand(2);
17586 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17587 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17588 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17589 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17590 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17592 case Intrinsic::x86_avx512_kortestz_w:
17593 case Intrinsic::x86_avx512_kortestc_w: {
17594 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17595 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17596 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17597 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17598 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17599 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17600 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17603 case Intrinsic::x86_sse42_pcmpistria128:
17604 case Intrinsic::x86_sse42_pcmpestria128:
17605 case Intrinsic::x86_sse42_pcmpistric128:
17606 case Intrinsic::x86_sse42_pcmpestric128:
17607 case Intrinsic::x86_sse42_pcmpistrio128:
17608 case Intrinsic::x86_sse42_pcmpestrio128:
17609 case Intrinsic::x86_sse42_pcmpistris128:
17610 case Intrinsic::x86_sse42_pcmpestris128:
17611 case Intrinsic::x86_sse42_pcmpistriz128:
17612 case Intrinsic::x86_sse42_pcmpestriz128: {
17616 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17617 case Intrinsic::x86_sse42_pcmpistria128:
17618 Opcode = X86ISD::PCMPISTRI;
17619 X86CC = X86::COND_A;
17621 case Intrinsic::x86_sse42_pcmpestria128:
17622 Opcode = X86ISD::PCMPESTRI;
17623 X86CC = X86::COND_A;
17625 case Intrinsic::x86_sse42_pcmpistric128:
17626 Opcode = X86ISD::PCMPISTRI;
17627 X86CC = X86::COND_B;
17629 case Intrinsic::x86_sse42_pcmpestric128:
17630 Opcode = X86ISD::PCMPESTRI;
17631 X86CC = X86::COND_B;
17633 case Intrinsic::x86_sse42_pcmpistrio128:
17634 Opcode = X86ISD::PCMPISTRI;
17635 X86CC = X86::COND_O;
17637 case Intrinsic::x86_sse42_pcmpestrio128:
17638 Opcode = X86ISD::PCMPESTRI;
17639 X86CC = X86::COND_O;
17641 case Intrinsic::x86_sse42_pcmpistris128:
17642 Opcode = X86ISD::PCMPISTRI;
17643 X86CC = X86::COND_S;
17645 case Intrinsic::x86_sse42_pcmpestris128:
17646 Opcode = X86ISD::PCMPESTRI;
17647 X86CC = X86::COND_S;
17649 case Intrinsic::x86_sse42_pcmpistriz128:
17650 Opcode = X86ISD::PCMPISTRI;
17651 X86CC = X86::COND_E;
17653 case Intrinsic::x86_sse42_pcmpestriz128:
17654 Opcode = X86ISD::PCMPESTRI;
17655 X86CC = X86::COND_E;
17658 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17659 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17660 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17661 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17662 DAG.getConstant(X86CC, MVT::i8),
17663 SDValue(PCMP.getNode(), 1));
17664 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17667 case Intrinsic::x86_sse42_pcmpistri128:
17668 case Intrinsic::x86_sse42_pcmpestri128: {
17670 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17671 Opcode = X86ISD::PCMPISTRI;
17673 Opcode = X86ISD::PCMPESTRI;
17675 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17676 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17677 return DAG.getNode(Opcode, dl, VTs, NewOps);
17682 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17683 SDValue Src, SDValue Mask, SDValue Base,
17684 SDValue Index, SDValue ScaleOp, SDValue Chain,
17685 const X86Subtarget * Subtarget) {
17687 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17688 assert(C && "Invalid scale type");
17689 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17690 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17691 Index.getSimpleValueType().getVectorNumElements());
17693 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17695 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17697 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17698 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17699 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17700 SDValue Segment = DAG.getRegister(0, MVT::i32);
17701 if (Src.getOpcode() == ISD::UNDEF)
17702 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17703 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17704 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17705 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17706 return DAG.getMergeValues(RetOps, dl);
17709 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17710 SDValue Src, SDValue Mask, SDValue Base,
17711 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17713 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17714 assert(C && "Invalid scale type");
17715 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17716 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17717 SDValue Segment = DAG.getRegister(0, MVT::i32);
17718 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17719 Index.getSimpleValueType().getVectorNumElements());
17721 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17723 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17725 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17726 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17727 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17728 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17729 return SDValue(Res, 1);
17732 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17733 SDValue Mask, SDValue Base, SDValue Index,
17734 SDValue ScaleOp, SDValue Chain) {
17736 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17737 assert(C && "Invalid scale type");
17738 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17739 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17740 SDValue Segment = DAG.getRegister(0, MVT::i32);
17742 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17744 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17746 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17748 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17749 //SDVTList VTs = DAG.getVTList(MVT::Other);
17750 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17751 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17752 return SDValue(Res, 0);
17755 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17756 // read performance monitor counters (x86_rdpmc).
17757 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17758 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17759 SmallVectorImpl<SDValue> &Results) {
17760 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17761 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17764 // The ECX register is used to select the index of the performance counter
17766 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17768 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17770 // Reads the content of a 64-bit performance counter and returns it in the
17771 // registers EDX:EAX.
17772 if (Subtarget->is64Bit()) {
17773 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17774 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17777 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17778 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17781 Chain = HI.getValue(1);
17783 if (Subtarget->is64Bit()) {
17784 // The EAX register is loaded with the low-order 32 bits. The EDX register
17785 // is loaded with the supported high-order bits of the counter.
17786 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17787 DAG.getConstant(32, MVT::i8));
17788 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17789 Results.push_back(Chain);
17793 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17794 SDValue Ops[] = { LO, HI };
17795 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17796 Results.push_back(Pair);
17797 Results.push_back(Chain);
17800 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17801 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17802 // also used to custom lower READCYCLECOUNTER nodes.
17803 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17804 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17805 SmallVectorImpl<SDValue> &Results) {
17806 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17807 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17810 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17811 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17812 // and the EAX register is loaded with the low-order 32 bits.
17813 if (Subtarget->is64Bit()) {
17814 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17815 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17818 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17819 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17822 SDValue Chain = HI.getValue(1);
17824 if (Opcode == X86ISD::RDTSCP_DAG) {
17825 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17827 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17828 // the ECX register. Add 'ecx' explicitly to the chain.
17829 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17831 // Explicitly store the content of ECX at the location passed in input
17832 // to the 'rdtscp' intrinsic.
17833 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17834 MachinePointerInfo(), false, false, 0);
17837 if (Subtarget->is64Bit()) {
17838 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17839 // the EAX register is loaded with the low-order 32 bits.
17840 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17841 DAG.getConstant(32, MVT::i8));
17842 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17843 Results.push_back(Chain);
17847 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17848 SDValue Ops[] = { LO, HI };
17849 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17850 Results.push_back(Pair);
17851 Results.push_back(Chain);
17854 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17855 SelectionDAG &DAG) {
17856 SmallVector<SDValue, 2> Results;
17858 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17860 return DAG.getMergeValues(Results, DL);
17864 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17865 SelectionDAG &DAG) {
17866 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17868 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17873 switch(IntrData->Type) {
17875 llvm_unreachable("Unknown Intrinsic Type");
17879 // Emit the node with the right value type.
17880 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17881 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17883 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17884 // Otherwise return the value from Rand, which is always 0, casted to i32.
17885 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17886 DAG.getConstant(1, Op->getValueType(1)),
17887 DAG.getConstant(X86::COND_B, MVT::i32),
17888 SDValue(Result.getNode(), 1) };
17889 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17890 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17893 // Return { result, isValid, chain }.
17894 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17895 SDValue(Result.getNode(), 2));
17898 //gather(v1, mask, index, base, scale);
17899 SDValue Chain = Op.getOperand(0);
17900 SDValue Src = Op.getOperand(2);
17901 SDValue Base = Op.getOperand(3);
17902 SDValue Index = Op.getOperand(4);
17903 SDValue Mask = Op.getOperand(5);
17904 SDValue Scale = Op.getOperand(6);
17905 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17909 //scatter(base, mask, index, v1, scale);
17910 SDValue Chain = Op.getOperand(0);
17911 SDValue Base = Op.getOperand(2);
17912 SDValue Mask = Op.getOperand(3);
17913 SDValue Index = Op.getOperand(4);
17914 SDValue Src = Op.getOperand(5);
17915 SDValue Scale = Op.getOperand(6);
17916 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17919 SDValue Hint = Op.getOperand(6);
17921 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17922 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17923 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17924 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17925 SDValue Chain = Op.getOperand(0);
17926 SDValue Mask = Op.getOperand(2);
17927 SDValue Index = Op.getOperand(3);
17928 SDValue Base = Op.getOperand(4);
17929 SDValue Scale = Op.getOperand(5);
17930 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17932 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17934 SmallVector<SDValue, 2> Results;
17935 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17936 return DAG.getMergeValues(Results, dl);
17938 // Read Performance Monitoring Counters.
17940 SmallVector<SDValue, 2> Results;
17941 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17942 return DAG.getMergeValues(Results, dl);
17944 // XTEST intrinsics.
17946 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17947 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17948 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17949 DAG.getConstant(X86::COND_NE, MVT::i8),
17951 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17952 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17953 Ret, SDValue(InTrans.getNode(), 1));
17957 SmallVector<SDValue, 2> Results;
17958 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17959 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17960 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17961 DAG.getConstant(-1, MVT::i8));
17962 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17963 Op.getOperand(4), GenCF.getValue(1));
17964 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17965 Op.getOperand(5), MachinePointerInfo(),
17967 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17968 DAG.getConstant(X86::COND_B, MVT::i8),
17970 Results.push_back(SetCC);
17971 Results.push_back(Store);
17972 return DAG.getMergeValues(Results, dl);
17974 case COMPRESS_TO_MEM: {
17976 SDValue Mask = Op.getOperand(4);
17977 SDValue DataToCompress = Op.getOperand(3);
17978 SDValue Addr = Op.getOperand(2);
17979 SDValue Chain = Op.getOperand(0);
17981 if (isAllOnes(Mask)) // return just a store
17982 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17983 MachinePointerInfo(), false, false, 0);
17985 EVT VT = DataToCompress.getValueType();
17986 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17987 VT.getVectorNumElements());
17988 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17989 Mask.getValueType().getSizeInBits());
17990 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17991 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17992 DAG.getIntPtrConstant(0));
17994 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17995 DataToCompress, DAG.getUNDEF(VT));
17996 return DAG.getStore(Chain, dl, Compressed, Addr,
17997 MachinePointerInfo(), false, false, 0);
17999 case EXPAND_FROM_MEM: {
18001 SDValue Mask = Op.getOperand(4);
18002 SDValue PathThru = Op.getOperand(3);
18003 SDValue Addr = Op.getOperand(2);
18004 SDValue Chain = Op.getOperand(0);
18005 EVT VT = Op.getValueType();
18007 if (isAllOnes(Mask)) // return just a load
18008 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18010 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18011 VT.getVectorNumElements());
18012 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18013 Mask.getValueType().getSizeInBits());
18014 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18015 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18016 DAG.getIntPtrConstant(0));
18018 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18019 false, false, false, 0);
18021 SDValue Results[] = {
18022 DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand, PathThru),
18024 return DAG.getMergeValues(Results, dl);
18029 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18030 SelectionDAG &DAG) const {
18031 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18032 MFI->setReturnAddressIsTaken(true);
18034 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18037 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18039 EVT PtrVT = getPointerTy();
18042 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18043 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18044 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18045 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18046 DAG.getNode(ISD::ADD, dl, PtrVT,
18047 FrameAddr, Offset),
18048 MachinePointerInfo(), false, false, false, 0);
18051 // Just load the return address.
18052 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18053 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18054 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18057 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18058 MachineFunction &MF = DAG.getMachineFunction();
18059 MachineFrameInfo *MFI = MF.getFrameInfo();
18060 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18061 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18062 EVT VT = Op.getValueType();
18064 MFI->setFrameAddressIsTaken(true);
18066 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18067 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18068 // is not possible to crawl up the stack without looking at the unwind codes
18070 int FrameAddrIndex = FuncInfo->getFAIndex();
18071 if (!FrameAddrIndex) {
18072 // Set up a frame object for the return address.
18073 unsigned SlotSize = RegInfo->getSlotSize();
18074 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18075 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18076 FuncInfo->setFAIndex(FrameAddrIndex);
18078 return DAG.getFrameIndex(FrameAddrIndex, VT);
18081 unsigned FrameReg =
18082 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18083 SDLoc dl(Op); // FIXME probably not meaningful
18084 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18085 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18086 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18087 "Invalid Frame Register!");
18088 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18090 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18091 MachinePointerInfo(),
18092 false, false, false, 0);
18096 // FIXME? Maybe this could be a TableGen attribute on some registers and
18097 // this table could be generated automatically from RegInfo.
18098 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18100 unsigned Reg = StringSwitch<unsigned>(RegName)
18101 .Case("esp", X86::ESP)
18102 .Case("rsp", X86::RSP)
18106 report_fatal_error("Invalid register name global variable");
18109 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18110 SelectionDAG &DAG) const {
18111 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18112 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18115 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18116 SDValue Chain = Op.getOperand(0);
18117 SDValue Offset = Op.getOperand(1);
18118 SDValue Handler = Op.getOperand(2);
18121 EVT PtrVT = getPointerTy();
18122 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18123 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18124 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18125 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18126 "Invalid Frame Register!");
18127 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18128 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18130 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18131 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18132 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18133 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18135 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18137 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18138 DAG.getRegister(StoreAddrReg, PtrVT));
18141 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18142 SelectionDAG &DAG) const {
18144 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18145 DAG.getVTList(MVT::i32, MVT::Other),
18146 Op.getOperand(0), Op.getOperand(1));
18149 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18150 SelectionDAG &DAG) const {
18152 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18153 Op.getOperand(0), Op.getOperand(1));
18156 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18157 return Op.getOperand(0);
18160 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18161 SelectionDAG &DAG) const {
18162 SDValue Root = Op.getOperand(0);
18163 SDValue Trmp = Op.getOperand(1); // trampoline
18164 SDValue FPtr = Op.getOperand(2); // nested function
18165 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18168 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18169 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18171 if (Subtarget->is64Bit()) {
18172 SDValue OutChains[6];
18174 // Large code-model.
18175 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18176 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18178 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18179 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18181 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18183 // Load the pointer to the nested function into R11.
18184 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18185 SDValue Addr = Trmp;
18186 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18187 Addr, MachinePointerInfo(TrmpAddr),
18190 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18191 DAG.getConstant(2, MVT::i64));
18192 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18193 MachinePointerInfo(TrmpAddr, 2),
18196 // Load the 'nest' parameter value into R10.
18197 // R10 is specified in X86CallingConv.td
18198 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18199 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18200 DAG.getConstant(10, MVT::i64));
18201 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18202 Addr, MachinePointerInfo(TrmpAddr, 10),
18205 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18206 DAG.getConstant(12, MVT::i64));
18207 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18208 MachinePointerInfo(TrmpAddr, 12),
18211 // Jump to the nested function.
18212 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18213 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18214 DAG.getConstant(20, MVT::i64));
18215 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18216 Addr, MachinePointerInfo(TrmpAddr, 20),
18219 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18220 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18221 DAG.getConstant(22, MVT::i64));
18222 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18223 MachinePointerInfo(TrmpAddr, 22),
18226 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18228 const Function *Func =
18229 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18230 CallingConv::ID CC = Func->getCallingConv();
18235 llvm_unreachable("Unsupported calling convention");
18236 case CallingConv::C:
18237 case CallingConv::X86_StdCall: {
18238 // Pass 'nest' parameter in ECX.
18239 // Must be kept in sync with X86CallingConv.td
18240 NestReg = X86::ECX;
18242 // Check that ECX wasn't needed by an 'inreg' parameter.
18243 FunctionType *FTy = Func->getFunctionType();
18244 const AttributeSet &Attrs = Func->getAttributes();
18246 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18247 unsigned InRegCount = 0;
18250 for (FunctionType::param_iterator I = FTy->param_begin(),
18251 E = FTy->param_end(); I != E; ++I, ++Idx)
18252 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18253 // FIXME: should only count parameters that are lowered to integers.
18254 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18256 if (InRegCount > 2) {
18257 report_fatal_error("Nest register in use - reduce number of inreg"
18263 case CallingConv::X86_FastCall:
18264 case CallingConv::X86_ThisCall:
18265 case CallingConv::Fast:
18266 // Pass 'nest' parameter in EAX.
18267 // Must be kept in sync with X86CallingConv.td
18268 NestReg = X86::EAX;
18272 SDValue OutChains[4];
18273 SDValue Addr, Disp;
18275 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18276 DAG.getConstant(10, MVT::i32));
18277 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18279 // This is storing the opcode for MOV32ri.
18280 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18281 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18282 OutChains[0] = DAG.getStore(Root, dl,
18283 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18284 Trmp, MachinePointerInfo(TrmpAddr),
18287 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18288 DAG.getConstant(1, MVT::i32));
18289 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18290 MachinePointerInfo(TrmpAddr, 1),
18293 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18294 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18295 DAG.getConstant(5, MVT::i32));
18296 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18297 MachinePointerInfo(TrmpAddr, 5),
18300 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18301 DAG.getConstant(6, MVT::i32));
18302 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18303 MachinePointerInfo(TrmpAddr, 6),
18306 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18310 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18311 SelectionDAG &DAG) const {
18313 The rounding mode is in bits 11:10 of FPSR, and has the following
18315 00 Round to nearest
18320 FLT_ROUNDS, on the other hand, expects the following:
18327 To perform the conversion, we do:
18328 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18331 MachineFunction &MF = DAG.getMachineFunction();
18332 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18333 unsigned StackAlignment = TFI.getStackAlignment();
18334 MVT VT = Op.getSimpleValueType();
18337 // Save FP Control Word to stack slot
18338 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18339 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18341 MachineMemOperand *MMO =
18342 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18343 MachineMemOperand::MOStore, 2, 2);
18345 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18346 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18347 DAG.getVTList(MVT::Other),
18348 Ops, MVT::i16, MMO);
18350 // Load FP Control Word from stack slot
18351 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18352 MachinePointerInfo(), false, false, false, 0);
18354 // Transform as necessary
18356 DAG.getNode(ISD::SRL, DL, MVT::i16,
18357 DAG.getNode(ISD::AND, DL, MVT::i16,
18358 CWD, DAG.getConstant(0x800, MVT::i16)),
18359 DAG.getConstant(11, MVT::i8));
18361 DAG.getNode(ISD::SRL, DL, MVT::i16,
18362 DAG.getNode(ISD::AND, DL, MVT::i16,
18363 CWD, DAG.getConstant(0x400, MVT::i16)),
18364 DAG.getConstant(9, MVT::i8));
18367 DAG.getNode(ISD::AND, DL, MVT::i16,
18368 DAG.getNode(ISD::ADD, DL, MVT::i16,
18369 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18370 DAG.getConstant(1, MVT::i16)),
18371 DAG.getConstant(3, MVT::i16));
18373 return DAG.getNode((VT.getSizeInBits() < 16 ?
18374 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18377 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18378 MVT VT = Op.getSimpleValueType();
18380 unsigned NumBits = VT.getSizeInBits();
18383 Op = Op.getOperand(0);
18384 if (VT == MVT::i8) {
18385 // Zero extend to i32 since there is not an i8 bsr.
18387 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18390 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18391 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18392 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18394 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18397 DAG.getConstant(NumBits+NumBits-1, OpVT),
18398 DAG.getConstant(X86::COND_E, MVT::i8),
18401 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18403 // Finally xor with NumBits-1.
18404 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18407 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18411 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18412 MVT VT = Op.getSimpleValueType();
18414 unsigned NumBits = VT.getSizeInBits();
18417 Op = Op.getOperand(0);
18418 if (VT == MVT::i8) {
18419 // Zero extend to i32 since there is not an i8 bsr.
18421 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18424 // Issue a bsr (scan bits in reverse).
18425 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18426 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18428 // And xor with NumBits-1.
18429 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18432 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18436 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18437 MVT VT = Op.getSimpleValueType();
18438 unsigned NumBits = VT.getSizeInBits();
18440 Op = Op.getOperand(0);
18442 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18443 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18444 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18446 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18449 DAG.getConstant(NumBits, VT),
18450 DAG.getConstant(X86::COND_E, MVT::i8),
18453 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18456 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18457 // ones, and then concatenate the result back.
18458 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18459 MVT VT = Op.getSimpleValueType();
18461 assert(VT.is256BitVector() && VT.isInteger() &&
18462 "Unsupported value type for operation");
18464 unsigned NumElems = VT.getVectorNumElements();
18467 // Extract the LHS vectors
18468 SDValue LHS = Op.getOperand(0);
18469 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18470 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18472 // Extract the RHS vectors
18473 SDValue RHS = Op.getOperand(1);
18474 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18475 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18477 MVT EltVT = VT.getVectorElementType();
18478 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18480 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18481 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18482 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18485 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18486 assert(Op.getSimpleValueType().is256BitVector() &&
18487 Op.getSimpleValueType().isInteger() &&
18488 "Only handle AVX 256-bit vector integer operation");
18489 return Lower256IntArith(Op, DAG);
18492 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18493 assert(Op.getSimpleValueType().is256BitVector() &&
18494 Op.getSimpleValueType().isInteger() &&
18495 "Only handle AVX 256-bit vector integer operation");
18496 return Lower256IntArith(Op, DAG);
18499 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18500 SelectionDAG &DAG) {
18502 MVT VT = Op.getSimpleValueType();
18504 // Decompose 256-bit ops into smaller 128-bit ops.
18505 if (VT.is256BitVector() && !Subtarget->hasInt256())
18506 return Lower256IntArith(Op, DAG);
18508 SDValue A = Op.getOperand(0);
18509 SDValue B = Op.getOperand(1);
18511 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18512 if (VT == MVT::v4i32) {
18513 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18514 "Should not custom lower when pmuldq is available!");
18516 // Extract the odd parts.
18517 static const int UnpackMask[] = { 1, -1, 3, -1 };
18518 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18519 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18521 // Multiply the even parts.
18522 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18523 // Now multiply odd parts.
18524 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18526 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18527 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18529 // Merge the two vectors back together with a shuffle. This expands into 2
18531 static const int ShufMask[] = { 0, 4, 2, 6 };
18532 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18535 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18536 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18538 // Ahi = psrlqi(a, 32);
18539 // Bhi = psrlqi(b, 32);
18541 // AloBlo = pmuludq(a, b);
18542 // AloBhi = pmuludq(a, Bhi);
18543 // AhiBlo = pmuludq(Ahi, b);
18545 // AloBhi = psllqi(AloBhi, 32);
18546 // AhiBlo = psllqi(AhiBlo, 32);
18547 // return AloBlo + AloBhi + AhiBlo;
18549 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18550 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18552 // Bit cast to 32-bit vectors for MULUDQ
18553 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18554 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18555 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18556 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18557 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18558 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18560 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18561 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18562 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18564 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18565 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18567 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18568 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18571 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18572 assert(Subtarget->isTargetWin64() && "Unexpected target");
18573 EVT VT = Op.getValueType();
18574 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18575 "Unexpected return type for lowering");
18579 switch (Op->getOpcode()) {
18580 default: llvm_unreachable("Unexpected request for libcall!");
18581 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18582 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18583 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18584 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18585 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18586 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18590 SDValue InChain = DAG.getEntryNode();
18592 TargetLowering::ArgListTy Args;
18593 TargetLowering::ArgListEntry Entry;
18594 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18595 EVT ArgVT = Op->getOperand(i).getValueType();
18596 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18597 "Unexpected argument type for lowering");
18598 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18599 Entry.Node = StackPtr;
18600 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18602 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18603 Entry.Ty = PointerType::get(ArgTy,0);
18604 Entry.isSExt = false;
18605 Entry.isZExt = false;
18606 Args.push_back(Entry);
18609 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18612 TargetLowering::CallLoweringInfo CLI(DAG);
18613 CLI.setDebugLoc(dl).setChain(InChain)
18614 .setCallee(getLibcallCallingConv(LC),
18615 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18616 Callee, std::move(Args), 0)
18617 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18619 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18620 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18623 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18624 SelectionDAG &DAG) {
18625 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18626 EVT VT = Op0.getValueType();
18629 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18630 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18632 // PMULxD operations multiply each even value (starting at 0) of LHS with
18633 // the related value of RHS and produce a widen result.
18634 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18635 // => <2 x i64> <ae|cg>
18637 // In other word, to have all the results, we need to perform two PMULxD:
18638 // 1. one with the even values.
18639 // 2. one with the odd values.
18640 // To achieve #2, with need to place the odd values at an even position.
18642 // Place the odd value at an even position (basically, shift all values 1
18643 // step to the left):
18644 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18645 // <a|b|c|d> => <b|undef|d|undef>
18646 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18647 // <e|f|g|h> => <f|undef|h|undef>
18648 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18650 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18652 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18653 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18655 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18656 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18657 // => <2 x i64> <ae|cg>
18658 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18659 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18660 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18661 // => <2 x i64> <bf|dh>
18662 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18663 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18665 // Shuffle it back into the right order.
18666 SDValue Highs, Lows;
18667 if (VT == MVT::v8i32) {
18668 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18669 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18670 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18671 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18673 const int HighMask[] = {1, 5, 3, 7};
18674 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18675 const int LowMask[] = {0, 4, 2, 6};
18676 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18679 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18680 // unsigned multiply.
18681 if (IsSigned && !Subtarget->hasSSE41()) {
18683 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18684 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18685 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18686 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18687 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18689 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18690 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18693 // The first result of MUL_LOHI is actually the low value, followed by the
18695 SDValue Ops[] = {Lows, Highs};
18696 return DAG.getMergeValues(Ops, dl);
18699 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18700 const X86Subtarget *Subtarget) {
18701 MVT VT = Op.getSimpleValueType();
18703 SDValue R = Op.getOperand(0);
18704 SDValue Amt = Op.getOperand(1);
18706 // Optimize shl/srl/sra with constant shift amount.
18707 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18708 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18709 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18711 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18712 (Subtarget->hasInt256() &&
18713 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18714 (Subtarget->hasAVX512() &&
18715 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18716 if (Op.getOpcode() == ISD::SHL)
18717 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18719 if (Op.getOpcode() == ISD::SRL)
18720 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18722 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18723 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18727 if (VT == MVT::v16i8) {
18728 if (Op.getOpcode() == ISD::SHL) {
18729 // Make a large shift.
18730 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18731 MVT::v8i16, R, ShiftAmt,
18733 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18734 // Zero out the rightmost bits.
18735 SmallVector<SDValue, 16> V(16,
18736 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18738 return DAG.getNode(ISD::AND, dl, VT, SHL,
18739 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18741 if (Op.getOpcode() == ISD::SRL) {
18742 // Make a large shift.
18743 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18744 MVT::v8i16, R, ShiftAmt,
18746 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18747 // Zero out the leftmost bits.
18748 SmallVector<SDValue, 16> V(16,
18749 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18751 return DAG.getNode(ISD::AND, dl, VT, SRL,
18752 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18754 if (Op.getOpcode() == ISD::SRA) {
18755 if (ShiftAmt == 7) {
18756 // R s>> 7 === R s< 0
18757 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18758 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18761 // R s>> a === ((R u>> a) ^ m) - m
18762 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18763 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18765 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18766 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18767 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18770 llvm_unreachable("Unknown shift opcode.");
18773 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18774 if (Op.getOpcode() == ISD::SHL) {
18775 // Make a large shift.
18776 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18777 MVT::v16i16, R, ShiftAmt,
18779 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18780 // Zero out the rightmost bits.
18781 SmallVector<SDValue, 32> V(32,
18782 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18784 return DAG.getNode(ISD::AND, dl, VT, SHL,
18785 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18787 if (Op.getOpcode() == ISD::SRL) {
18788 // Make a large shift.
18789 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18790 MVT::v16i16, R, ShiftAmt,
18792 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18793 // Zero out the leftmost bits.
18794 SmallVector<SDValue, 32> V(32,
18795 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18797 return DAG.getNode(ISD::AND, dl, VT, SRL,
18798 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18800 if (Op.getOpcode() == ISD::SRA) {
18801 if (ShiftAmt == 7) {
18802 // R s>> 7 === R s< 0
18803 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18804 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18807 // R s>> a === ((R u>> a) ^ m) - m
18808 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18809 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18811 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18812 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18813 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18816 llvm_unreachable("Unknown shift opcode.");
18821 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18822 if (!Subtarget->is64Bit() &&
18823 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18824 Amt.getOpcode() == ISD::BITCAST &&
18825 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18826 Amt = Amt.getOperand(0);
18827 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18828 VT.getVectorNumElements();
18829 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18830 uint64_t ShiftAmt = 0;
18831 for (unsigned i = 0; i != Ratio; ++i) {
18832 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18836 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18838 // Check remaining shift amounts.
18839 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18840 uint64_t ShAmt = 0;
18841 for (unsigned j = 0; j != Ratio; ++j) {
18842 ConstantSDNode *C =
18843 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18847 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18849 if (ShAmt != ShiftAmt)
18852 switch (Op.getOpcode()) {
18854 llvm_unreachable("Unknown shift opcode!");
18856 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18859 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18862 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18870 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18871 const X86Subtarget* Subtarget) {
18872 MVT VT = Op.getSimpleValueType();
18874 SDValue R = Op.getOperand(0);
18875 SDValue Amt = Op.getOperand(1);
18877 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18878 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18879 (Subtarget->hasInt256() &&
18880 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18881 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18882 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18884 EVT EltVT = VT.getVectorElementType();
18886 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18887 // Check if this build_vector node is doing a splat.
18888 // If so, then set BaseShAmt equal to the splat value.
18889 BaseShAmt = BV->getSplatValue();
18890 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18891 BaseShAmt = SDValue();
18893 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18894 Amt = Amt.getOperand(0);
18896 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18897 if (SVN && SVN->isSplat()) {
18898 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18899 SDValue InVec = Amt.getOperand(0);
18900 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18901 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18902 "Unexpected shuffle index found!");
18903 BaseShAmt = InVec.getOperand(SplatIdx);
18904 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18905 if (ConstantSDNode *C =
18906 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18907 if (C->getZExtValue() == SplatIdx)
18908 BaseShAmt = InVec.getOperand(1);
18913 // Avoid introducing an extract element from a shuffle.
18914 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18915 DAG.getIntPtrConstant(SplatIdx));
18919 if (BaseShAmt.getNode()) {
18920 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18921 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18922 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18923 else if (EltVT.bitsLT(MVT::i32))
18924 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18926 switch (Op.getOpcode()) {
18928 llvm_unreachable("Unknown shift opcode!");
18930 switch (VT.SimpleTy) {
18931 default: return SDValue();
18940 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18943 switch (VT.SimpleTy) {
18944 default: return SDValue();
18951 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18954 switch (VT.SimpleTy) {
18955 default: return SDValue();
18964 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18970 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18971 if (!Subtarget->is64Bit() &&
18972 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18973 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18974 Amt.getOpcode() == ISD::BITCAST &&
18975 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18976 Amt = Amt.getOperand(0);
18977 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18978 VT.getVectorNumElements();
18979 std::vector<SDValue> Vals(Ratio);
18980 for (unsigned i = 0; i != Ratio; ++i)
18981 Vals[i] = Amt.getOperand(i);
18982 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18983 for (unsigned j = 0; j != Ratio; ++j)
18984 if (Vals[j] != Amt.getOperand(i + j))
18987 switch (Op.getOpcode()) {
18989 llvm_unreachable("Unknown shift opcode!");
18991 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18993 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18995 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19002 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19003 SelectionDAG &DAG) {
19004 MVT VT = Op.getSimpleValueType();
19006 SDValue R = Op.getOperand(0);
19007 SDValue Amt = Op.getOperand(1);
19010 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19011 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19013 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19017 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19021 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19023 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19024 if (Subtarget->hasInt256()) {
19025 if (Op.getOpcode() == ISD::SRL &&
19026 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19027 VT == MVT::v4i64 || VT == MVT::v8i32))
19029 if (Op.getOpcode() == ISD::SHL &&
19030 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19031 VT == MVT::v4i64 || VT == MVT::v8i32))
19033 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19037 // If possible, lower this packed shift into a vector multiply instead of
19038 // expanding it into a sequence of scalar shifts.
19039 // Do this only if the vector shift count is a constant build_vector.
19040 if (Op.getOpcode() == ISD::SHL &&
19041 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19042 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19043 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19044 SmallVector<SDValue, 8> Elts;
19045 EVT SVT = VT.getScalarType();
19046 unsigned SVTBits = SVT.getSizeInBits();
19047 const APInt &One = APInt(SVTBits, 1);
19048 unsigned NumElems = VT.getVectorNumElements();
19050 for (unsigned i=0; i !=NumElems; ++i) {
19051 SDValue Op = Amt->getOperand(i);
19052 if (Op->getOpcode() == ISD::UNDEF) {
19053 Elts.push_back(Op);
19057 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19058 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19059 uint64_t ShAmt = C.getZExtValue();
19060 if (ShAmt >= SVTBits) {
19061 Elts.push_back(DAG.getUNDEF(SVT));
19064 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19066 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19067 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19070 // Lower SHL with variable shift amount.
19071 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19072 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19074 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19075 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19076 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19077 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19080 // If possible, lower this shift as a sequence of two shifts by
19081 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19083 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19085 // Could be rewritten as:
19086 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19088 // The advantage is that the two shifts from the example would be
19089 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19090 // the vector shift into four scalar shifts plus four pairs of vector
19092 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19093 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19094 unsigned TargetOpcode = X86ISD::MOVSS;
19095 bool CanBeSimplified;
19096 // The splat value for the first packed shift (the 'X' from the example).
19097 SDValue Amt1 = Amt->getOperand(0);
19098 // The splat value for the second packed shift (the 'Y' from the example).
19099 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19100 Amt->getOperand(2);
19102 // See if it is possible to replace this node with a sequence of
19103 // two shifts followed by a MOVSS/MOVSD
19104 if (VT == MVT::v4i32) {
19105 // Check if it is legal to use a MOVSS.
19106 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19107 Amt2 == Amt->getOperand(3);
19108 if (!CanBeSimplified) {
19109 // Otherwise, check if we can still simplify this node using a MOVSD.
19110 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19111 Amt->getOperand(2) == Amt->getOperand(3);
19112 TargetOpcode = X86ISD::MOVSD;
19113 Amt2 = Amt->getOperand(2);
19116 // Do similar checks for the case where the machine value type
19118 CanBeSimplified = Amt1 == Amt->getOperand(1);
19119 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19120 CanBeSimplified = Amt2 == Amt->getOperand(i);
19122 if (!CanBeSimplified) {
19123 TargetOpcode = X86ISD::MOVSD;
19124 CanBeSimplified = true;
19125 Amt2 = Amt->getOperand(4);
19126 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19127 CanBeSimplified = Amt1 == Amt->getOperand(i);
19128 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19129 CanBeSimplified = Amt2 == Amt->getOperand(j);
19133 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19134 isa<ConstantSDNode>(Amt2)) {
19135 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19136 EVT CastVT = MVT::v4i32;
19138 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19139 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19141 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19142 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19143 if (TargetOpcode == X86ISD::MOVSD)
19144 CastVT = MVT::v2i64;
19145 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19146 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19147 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19149 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19153 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19154 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19157 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19158 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19160 // Turn 'a' into a mask suitable for VSELECT
19161 SDValue VSelM = DAG.getConstant(0x80, VT);
19162 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19163 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19165 SDValue CM1 = DAG.getConstant(0x0f, VT);
19166 SDValue CM2 = DAG.getConstant(0x3f, VT);
19168 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19169 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19170 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19171 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19172 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19175 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19176 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19177 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19179 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19180 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19181 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19182 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19183 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19186 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19187 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19188 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19190 // return VSELECT(r, r+r, a);
19191 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19192 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19196 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19197 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19198 // solution better.
19199 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19200 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19202 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19203 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19204 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19205 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19206 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19209 // Decompose 256-bit shifts into smaller 128-bit shifts.
19210 if (VT.is256BitVector()) {
19211 unsigned NumElems = VT.getVectorNumElements();
19212 MVT EltVT = VT.getVectorElementType();
19213 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19215 // Extract the two vectors
19216 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19217 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19219 // Recreate the shift amount vectors
19220 SDValue Amt1, Amt2;
19221 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19222 // Constant shift amount
19223 SmallVector<SDValue, 4> Amt1Csts;
19224 SmallVector<SDValue, 4> Amt2Csts;
19225 for (unsigned i = 0; i != NumElems/2; ++i)
19226 Amt1Csts.push_back(Amt->getOperand(i));
19227 for (unsigned i = NumElems/2; i != NumElems; ++i)
19228 Amt2Csts.push_back(Amt->getOperand(i));
19230 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19231 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19233 // Variable shift amount
19234 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19235 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19238 // Issue new vector shifts for the smaller types
19239 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19240 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19242 // Concatenate the result back
19243 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19249 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19250 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19251 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19252 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19253 // has only one use.
19254 SDNode *N = Op.getNode();
19255 SDValue LHS = N->getOperand(0);
19256 SDValue RHS = N->getOperand(1);
19257 unsigned BaseOp = 0;
19260 switch (Op.getOpcode()) {
19261 default: llvm_unreachable("Unknown ovf instruction!");
19263 // A subtract of one will be selected as a INC. Note that INC doesn't
19264 // set CF, so we can't do this for UADDO.
19265 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19267 BaseOp = X86ISD::INC;
19268 Cond = X86::COND_O;
19271 BaseOp = X86ISD::ADD;
19272 Cond = X86::COND_O;
19275 BaseOp = X86ISD::ADD;
19276 Cond = X86::COND_B;
19279 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19280 // set CF, so we can't do this for USUBO.
19281 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19283 BaseOp = X86ISD::DEC;
19284 Cond = X86::COND_O;
19287 BaseOp = X86ISD::SUB;
19288 Cond = X86::COND_O;
19291 BaseOp = X86ISD::SUB;
19292 Cond = X86::COND_B;
19295 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19296 Cond = X86::COND_O;
19298 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19299 if (N->getValueType(0) == MVT::i8) {
19300 BaseOp = X86ISD::UMUL8;
19301 Cond = X86::COND_O;
19304 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19306 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19309 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19310 DAG.getConstant(X86::COND_O, MVT::i32),
19311 SDValue(Sum.getNode(), 2));
19313 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19317 // Also sets EFLAGS.
19318 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19319 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19322 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19323 DAG.getConstant(Cond, MVT::i32),
19324 SDValue(Sum.getNode(), 1));
19326 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19329 // Sign extension of the low part of vector elements. This may be used either
19330 // when sign extend instructions are not available or if the vector element
19331 // sizes already match the sign-extended size. If the vector elements are in
19332 // their pre-extended size and sign extend instructions are available, that will
19333 // be handled by LowerSIGN_EXTEND.
19334 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19335 SelectionDAG &DAG) const {
19337 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19338 MVT VT = Op.getSimpleValueType();
19340 if (!Subtarget->hasSSE2() || !VT.isVector())
19343 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19344 ExtraVT.getScalarType().getSizeInBits();
19346 switch (VT.SimpleTy) {
19347 default: return SDValue();
19350 if (!Subtarget->hasFp256())
19352 if (!Subtarget->hasInt256()) {
19353 // needs to be split
19354 unsigned NumElems = VT.getVectorNumElements();
19356 // Extract the LHS vectors
19357 SDValue LHS = Op.getOperand(0);
19358 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19359 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19361 MVT EltVT = VT.getVectorElementType();
19362 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19364 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19365 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19366 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19368 SDValue Extra = DAG.getValueType(ExtraVT);
19370 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19371 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19373 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19378 SDValue Op0 = Op.getOperand(0);
19380 // This is a sign extension of some low part of vector elements without
19381 // changing the size of the vector elements themselves:
19382 // Shift-Left + Shift-Right-Algebraic.
19383 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19385 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19391 /// Returns true if the operand type is exactly twice the native width, and
19392 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19393 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19394 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19395 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19396 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19399 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19400 else if (OpWidth == 128)
19401 return Subtarget->hasCmpxchg16b();
19406 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19407 return needsCmpXchgNb(SI->getValueOperand()->getType());
19410 // Note: this turns large loads into lock cmpxchg8b/16b.
19411 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19412 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19413 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19414 return needsCmpXchgNb(PTy->getElementType());
19417 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19418 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19419 const Type *MemType = AI->getType();
19421 // If the operand is too big, we must see if cmpxchg8/16b is available
19422 // and default to library calls otherwise.
19423 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19424 return needsCmpXchgNb(MemType);
19426 AtomicRMWInst::BinOp Op = AI->getOperation();
19429 llvm_unreachable("Unknown atomic operation");
19430 case AtomicRMWInst::Xchg:
19431 case AtomicRMWInst::Add:
19432 case AtomicRMWInst::Sub:
19433 // It's better to use xadd, xsub or xchg for these in all cases.
19435 case AtomicRMWInst::Or:
19436 case AtomicRMWInst::And:
19437 case AtomicRMWInst::Xor:
19438 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19439 // prefix to a normal instruction for these operations.
19440 return !AI->use_empty();
19441 case AtomicRMWInst::Nand:
19442 case AtomicRMWInst::Max:
19443 case AtomicRMWInst::Min:
19444 case AtomicRMWInst::UMax:
19445 case AtomicRMWInst::UMin:
19446 // These always require a non-trivial set of data operations on x86. We must
19447 // use a cmpxchg loop.
19452 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19453 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19454 // no-sse2). There isn't any reason to disable it if the target processor
19456 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19460 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19461 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19462 const Type *MemType = AI->getType();
19463 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19464 // there is no benefit in turning such RMWs into loads, and it is actually
19465 // harmful as it introduces a mfence.
19466 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19469 auto Builder = IRBuilder<>(AI);
19470 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19471 auto SynchScope = AI->getSynchScope();
19472 // We must restrict the ordering to avoid generating loads with Release or
19473 // ReleaseAcquire orderings.
19474 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19475 auto Ptr = AI->getPointerOperand();
19477 // Before the load we need a fence. Here is an example lifted from
19478 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19481 // x.store(1, relaxed);
19482 // r1 = y.fetch_add(0, release);
19484 // y.fetch_add(42, acquire);
19485 // r2 = x.load(relaxed);
19486 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19487 // lowered to just a load without a fence. A mfence flushes the store buffer,
19488 // making the optimization clearly correct.
19489 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19490 // otherwise, we might be able to be more agressive on relaxed idempotent
19491 // rmw. In practice, they do not look useful, so we don't try to be
19492 // especially clever.
19493 if (SynchScope == SingleThread) {
19494 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19495 // the IR level, so we must wrap it in an intrinsic.
19497 } else if (hasMFENCE(*Subtarget)) {
19498 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19499 Intrinsic::x86_sse2_mfence);
19500 Builder.CreateCall(MFence);
19502 // FIXME: it might make sense to use a locked operation here but on a
19503 // different cache-line to prevent cache-line bouncing. In practice it
19504 // is probably a small win, and x86 processors without mfence are rare
19505 // enough that we do not bother.
19509 // Finally we can emit the atomic load.
19510 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19511 AI->getType()->getPrimitiveSizeInBits());
19512 Loaded->setAtomic(Order, SynchScope);
19513 AI->replaceAllUsesWith(Loaded);
19514 AI->eraseFromParent();
19518 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19519 SelectionDAG &DAG) {
19521 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19522 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19523 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19524 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19526 // The only fence that needs an instruction is a sequentially-consistent
19527 // cross-thread fence.
19528 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19529 if (hasMFENCE(*Subtarget))
19530 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19532 SDValue Chain = Op.getOperand(0);
19533 SDValue Zero = DAG.getConstant(0, MVT::i32);
19535 DAG.getRegister(X86::ESP, MVT::i32), // Base
19536 DAG.getTargetConstant(1, MVT::i8), // Scale
19537 DAG.getRegister(0, MVT::i32), // Index
19538 DAG.getTargetConstant(0, MVT::i32), // Disp
19539 DAG.getRegister(0, MVT::i32), // Segment.
19543 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19544 return SDValue(Res, 0);
19547 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19548 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19551 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19552 SelectionDAG &DAG) {
19553 MVT T = Op.getSimpleValueType();
19557 switch(T.SimpleTy) {
19558 default: llvm_unreachable("Invalid value type!");
19559 case MVT::i8: Reg = X86::AL; size = 1; break;
19560 case MVT::i16: Reg = X86::AX; size = 2; break;
19561 case MVT::i32: Reg = X86::EAX; size = 4; break;
19563 assert(Subtarget->is64Bit() && "Node not type legal!");
19564 Reg = X86::RAX; size = 8;
19567 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19568 Op.getOperand(2), SDValue());
19569 SDValue Ops[] = { cpIn.getValue(0),
19572 DAG.getTargetConstant(size, MVT::i8),
19573 cpIn.getValue(1) };
19574 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19575 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19576 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19580 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19581 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19582 MVT::i32, cpOut.getValue(2));
19583 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19584 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19586 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19587 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19588 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19592 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19593 SelectionDAG &DAG) {
19594 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19595 MVT DstVT = Op.getSimpleValueType();
19597 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19598 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19599 if (DstVT != MVT::f64)
19600 // This conversion needs to be expanded.
19603 SDValue InVec = Op->getOperand(0);
19605 unsigned NumElts = SrcVT.getVectorNumElements();
19606 EVT SVT = SrcVT.getVectorElementType();
19608 // Widen the vector in input in the case of MVT::v2i32.
19609 // Example: from MVT::v2i32 to MVT::v4i32.
19610 SmallVector<SDValue, 16> Elts;
19611 for (unsigned i = 0, e = NumElts; i != e; ++i)
19612 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19613 DAG.getIntPtrConstant(i)));
19615 // Explicitly mark the extra elements as Undef.
19616 Elts.append(NumElts, DAG.getUNDEF(SVT));
19618 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19619 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19620 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19621 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19622 DAG.getIntPtrConstant(0));
19625 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19626 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19627 assert((DstVT == MVT::i64 ||
19628 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19629 "Unexpected custom BITCAST");
19630 // i64 <=> MMX conversions are Legal.
19631 if (SrcVT==MVT::i64 && DstVT.isVector())
19633 if (DstVT==MVT::i64 && SrcVT.isVector())
19635 // MMX <=> MMX conversions are Legal.
19636 if (SrcVT.isVector() && DstVT.isVector())
19638 // All other conversions need to be expanded.
19642 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19643 SelectionDAG &DAG) {
19644 SDNode *Node = Op.getNode();
19647 Op = Op.getOperand(0);
19648 EVT VT = Op.getValueType();
19649 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19650 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19652 unsigned NumElts = VT.getVectorNumElements();
19653 EVT EltVT = VT.getVectorElementType();
19654 unsigned Len = EltVT.getSizeInBits();
19656 // This is the vectorized version of the "best" algorithm from
19657 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19658 // with a minor tweak to use a series of adds + shifts instead of vector
19659 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19661 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19662 // v8i32 => Always profitable
19664 // FIXME: There a couple of possible improvements:
19666 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19667 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19669 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19670 "CTPOP not implemented for this vector element type.");
19672 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19673 // extra legalization.
19674 bool NeedsBitcast = EltVT == MVT::i32;
19675 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19677 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19678 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19679 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19681 // v = v - ((v >> 1) & 0x55555555...)
19682 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19683 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19684 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19686 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19688 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19689 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19691 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19693 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19694 if (VT != And.getValueType())
19695 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19696 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19698 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19699 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19700 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19701 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19702 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19704 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19705 if (NeedsBitcast) {
19706 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19707 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19708 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19711 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19712 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19713 if (VT != AndRHS.getValueType()) {
19714 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19715 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19717 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19719 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19720 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19721 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19722 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19723 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19725 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19726 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19727 if (NeedsBitcast) {
19728 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19729 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19731 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19732 if (VT != And.getValueType())
19733 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19735 // The algorithm mentioned above uses:
19736 // v = (v * 0x01010101...) >> (Len - 8)
19738 // Change it to use vector adds + vector shifts which yield faster results on
19739 // Haswell than using vector integer multiplication.
19741 // For i32 elements:
19742 // v = v + (v >> 8)
19743 // v = v + (v >> 16)
19745 // For i64 elements:
19746 // v = v + (v >> 8)
19747 // v = v + (v >> 16)
19748 // v = v + (v >> 32)
19751 SmallVector<SDValue, 8> Csts;
19752 for (unsigned i = 8; i <= Len/2; i *= 2) {
19753 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19754 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19755 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19756 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19760 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19761 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19762 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19763 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19764 if (NeedsBitcast) {
19765 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19766 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19768 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19769 if (VT != And.getValueType())
19770 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19775 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19776 SDNode *Node = Op.getNode();
19778 EVT T = Node->getValueType(0);
19779 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19780 DAG.getConstant(0, T), Node->getOperand(2));
19781 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19782 cast<AtomicSDNode>(Node)->getMemoryVT(),
19783 Node->getOperand(0),
19784 Node->getOperand(1), negOp,
19785 cast<AtomicSDNode>(Node)->getMemOperand(),
19786 cast<AtomicSDNode>(Node)->getOrdering(),
19787 cast<AtomicSDNode>(Node)->getSynchScope());
19790 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19791 SDNode *Node = Op.getNode();
19793 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19795 // Convert seq_cst store -> xchg
19796 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19797 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19798 // (The only way to get a 16-byte store is cmpxchg16b)
19799 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19800 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19801 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19802 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19803 cast<AtomicSDNode>(Node)->getMemoryVT(),
19804 Node->getOperand(0),
19805 Node->getOperand(1), Node->getOperand(2),
19806 cast<AtomicSDNode>(Node)->getMemOperand(),
19807 cast<AtomicSDNode>(Node)->getOrdering(),
19808 cast<AtomicSDNode>(Node)->getSynchScope());
19809 return Swap.getValue(1);
19811 // Other atomic stores have a simple pattern.
19815 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19816 EVT VT = Op.getNode()->getSimpleValueType(0);
19818 // Let legalize expand this if it isn't a legal type yet.
19819 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19822 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19825 bool ExtraOp = false;
19826 switch (Op.getOpcode()) {
19827 default: llvm_unreachable("Invalid code");
19828 case ISD::ADDC: Opc = X86ISD::ADD; break;
19829 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19830 case ISD::SUBC: Opc = X86ISD::SUB; break;
19831 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19835 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19837 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19838 Op.getOperand(1), Op.getOperand(2));
19841 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19842 SelectionDAG &DAG) {
19843 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19845 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19846 // which returns the values as { float, float } (in XMM0) or
19847 // { double, double } (which is returned in XMM0, XMM1).
19849 SDValue Arg = Op.getOperand(0);
19850 EVT ArgVT = Arg.getValueType();
19851 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19853 TargetLowering::ArgListTy Args;
19854 TargetLowering::ArgListEntry Entry;
19858 Entry.isSExt = false;
19859 Entry.isZExt = false;
19860 Args.push_back(Entry);
19862 bool isF64 = ArgVT == MVT::f64;
19863 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19864 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19865 // the results are returned via SRet in memory.
19866 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19867 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19868 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19870 Type *RetTy = isF64
19871 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19872 : (Type*)VectorType::get(ArgTy, 4);
19874 TargetLowering::CallLoweringInfo CLI(DAG);
19875 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19876 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19878 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19881 // Returned in xmm0 and xmm1.
19882 return CallResult.first;
19884 // Returned in bits 0:31 and 32:64 xmm0.
19885 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19886 CallResult.first, DAG.getIntPtrConstant(0));
19887 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19888 CallResult.first, DAG.getIntPtrConstant(1));
19889 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19890 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19893 /// LowerOperation - Provide custom lowering hooks for some operations.
19895 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19896 switch (Op.getOpcode()) {
19897 default: llvm_unreachable("Should not custom lower this!");
19898 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19899 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19900 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19901 return LowerCMP_SWAP(Op, Subtarget, DAG);
19902 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19903 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19904 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19905 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19906 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19907 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19908 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19909 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19910 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19911 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19912 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19913 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19914 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19915 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19916 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19917 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19918 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19919 case ISD::SHL_PARTS:
19920 case ISD::SRA_PARTS:
19921 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19922 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19923 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19924 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19925 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19926 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19927 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19928 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19929 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19930 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19931 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19933 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19934 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19935 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19936 case ISD::SETCC: return LowerSETCC(Op, DAG);
19937 case ISD::SELECT: return LowerSELECT(Op, DAG);
19938 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19939 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19940 case ISD::VASTART: return LowerVASTART(Op, DAG);
19941 case ISD::VAARG: return LowerVAARG(Op, DAG);
19942 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19943 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19944 case ISD::INTRINSIC_VOID:
19945 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19946 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19947 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19948 case ISD::FRAME_TO_ARGS_OFFSET:
19949 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19950 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19951 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19952 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19953 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19954 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19955 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19956 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19957 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19958 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19959 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19960 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19961 case ISD::UMUL_LOHI:
19962 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19965 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19971 case ISD::UMULO: return LowerXALUO(Op, DAG);
19972 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19973 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19977 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19978 case ISD::ADD: return LowerADD(Op, DAG);
19979 case ISD::SUB: return LowerSUB(Op, DAG);
19980 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19984 /// ReplaceNodeResults - Replace a node with an illegal result type
19985 /// with a new node built out of custom code.
19986 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19987 SmallVectorImpl<SDValue>&Results,
19988 SelectionDAG &DAG) const {
19990 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19991 switch (N->getOpcode()) {
19993 llvm_unreachable("Do not know how to custom type legalize this operation!");
19994 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
19995 case X86ISD::FMINC:
19997 case X86ISD::FMAXC:
19998 case X86ISD::FMAX: {
19999 EVT VT = N->getValueType(0);
20000 if (VT != MVT::v2f32)
20001 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20002 SDValue UNDEF = DAG.getUNDEF(VT);
20003 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20004 N->getOperand(0), UNDEF);
20005 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20006 N->getOperand(1), UNDEF);
20007 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20010 case ISD::SIGN_EXTEND_INREG:
20015 // We don't want to expand or promote these.
20022 case ISD::UDIVREM: {
20023 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20024 Results.push_back(V);
20027 case ISD::FP_TO_SINT:
20028 case ISD::FP_TO_UINT: {
20029 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20031 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20034 std::pair<SDValue,SDValue> Vals =
20035 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20036 SDValue FIST = Vals.first, StackSlot = Vals.second;
20037 if (FIST.getNode()) {
20038 EVT VT = N->getValueType(0);
20039 // Return a load from the stack slot.
20040 if (StackSlot.getNode())
20041 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20042 MachinePointerInfo(),
20043 false, false, false, 0));
20045 Results.push_back(FIST);
20049 case ISD::UINT_TO_FP: {
20050 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20051 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20052 N->getValueType(0) != MVT::v2f32)
20054 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20056 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20058 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20059 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20060 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20061 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20062 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20063 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20066 case ISD::FP_ROUND: {
20067 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20069 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20070 Results.push_back(V);
20073 case ISD::INTRINSIC_W_CHAIN: {
20074 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20076 default : llvm_unreachable("Do not know how to custom type "
20077 "legalize this intrinsic operation!");
20078 case Intrinsic::x86_rdtsc:
20079 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20081 case Intrinsic::x86_rdtscp:
20082 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20084 case Intrinsic::x86_rdpmc:
20085 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20088 case ISD::READCYCLECOUNTER: {
20089 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20092 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20093 EVT T = N->getValueType(0);
20094 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20095 bool Regs64bit = T == MVT::i128;
20096 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20097 SDValue cpInL, cpInH;
20098 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20099 DAG.getConstant(0, HalfT));
20100 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20101 DAG.getConstant(1, HalfT));
20102 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20103 Regs64bit ? X86::RAX : X86::EAX,
20105 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20106 Regs64bit ? X86::RDX : X86::EDX,
20107 cpInH, cpInL.getValue(1));
20108 SDValue swapInL, swapInH;
20109 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20110 DAG.getConstant(0, HalfT));
20111 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20112 DAG.getConstant(1, HalfT));
20113 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20114 Regs64bit ? X86::RBX : X86::EBX,
20115 swapInL, cpInH.getValue(1));
20116 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20117 Regs64bit ? X86::RCX : X86::ECX,
20118 swapInH, swapInL.getValue(1));
20119 SDValue Ops[] = { swapInH.getValue(0),
20121 swapInH.getValue(1) };
20122 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20123 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20124 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20125 X86ISD::LCMPXCHG8_DAG;
20126 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20127 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20128 Regs64bit ? X86::RAX : X86::EAX,
20129 HalfT, Result.getValue(1));
20130 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20131 Regs64bit ? X86::RDX : X86::EDX,
20132 HalfT, cpOutL.getValue(2));
20133 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20135 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20136 MVT::i32, cpOutH.getValue(2));
20138 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20139 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20140 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20142 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20143 Results.push_back(Success);
20144 Results.push_back(EFLAGS.getValue(1));
20147 case ISD::ATOMIC_SWAP:
20148 case ISD::ATOMIC_LOAD_ADD:
20149 case ISD::ATOMIC_LOAD_SUB:
20150 case ISD::ATOMIC_LOAD_AND:
20151 case ISD::ATOMIC_LOAD_OR:
20152 case ISD::ATOMIC_LOAD_XOR:
20153 case ISD::ATOMIC_LOAD_NAND:
20154 case ISD::ATOMIC_LOAD_MIN:
20155 case ISD::ATOMIC_LOAD_MAX:
20156 case ISD::ATOMIC_LOAD_UMIN:
20157 case ISD::ATOMIC_LOAD_UMAX:
20158 case ISD::ATOMIC_LOAD: {
20159 // Delegate to generic TypeLegalization. Situations we can really handle
20160 // should have already been dealt with by AtomicExpandPass.cpp.
20163 case ISD::BITCAST: {
20164 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20165 EVT DstVT = N->getValueType(0);
20166 EVT SrcVT = N->getOperand(0)->getValueType(0);
20168 if (SrcVT != MVT::f64 ||
20169 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20172 unsigned NumElts = DstVT.getVectorNumElements();
20173 EVT SVT = DstVT.getVectorElementType();
20174 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20175 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20176 MVT::v2f64, N->getOperand(0));
20177 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20179 if (ExperimentalVectorWideningLegalization) {
20180 // If we are legalizing vectors by widening, we already have the desired
20181 // legal vector type, just return it.
20182 Results.push_back(ToVecInt);
20186 SmallVector<SDValue, 8> Elts;
20187 for (unsigned i = 0, e = NumElts; i != e; ++i)
20188 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20189 ToVecInt, DAG.getIntPtrConstant(i)));
20191 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20196 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20198 default: return nullptr;
20199 case X86ISD::BSF: return "X86ISD::BSF";
20200 case X86ISD::BSR: return "X86ISD::BSR";
20201 case X86ISD::SHLD: return "X86ISD::SHLD";
20202 case X86ISD::SHRD: return "X86ISD::SHRD";
20203 case X86ISD::FAND: return "X86ISD::FAND";
20204 case X86ISD::FANDN: return "X86ISD::FANDN";
20205 case X86ISD::FOR: return "X86ISD::FOR";
20206 case X86ISD::FXOR: return "X86ISD::FXOR";
20207 case X86ISD::FSRL: return "X86ISD::FSRL";
20208 case X86ISD::FILD: return "X86ISD::FILD";
20209 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20210 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20211 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20212 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20213 case X86ISD::FLD: return "X86ISD::FLD";
20214 case X86ISD::FST: return "X86ISD::FST";
20215 case X86ISD::CALL: return "X86ISD::CALL";
20216 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20217 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20218 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20219 case X86ISD::BT: return "X86ISD::BT";
20220 case X86ISD::CMP: return "X86ISD::CMP";
20221 case X86ISD::COMI: return "X86ISD::COMI";
20222 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20223 case X86ISD::CMPM: return "X86ISD::CMPM";
20224 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20225 case X86ISD::SETCC: return "X86ISD::SETCC";
20226 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20227 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20228 case X86ISD::CMOV: return "X86ISD::CMOV";
20229 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20230 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20231 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20232 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20233 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20234 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20235 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20236 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20237 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20238 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20239 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20240 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20241 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20242 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20243 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20244 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20245 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20246 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20247 case X86ISD::HADD: return "X86ISD::HADD";
20248 case X86ISD::HSUB: return "X86ISD::HSUB";
20249 case X86ISD::FHADD: return "X86ISD::FHADD";
20250 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20251 case X86ISD::UMAX: return "X86ISD::UMAX";
20252 case X86ISD::UMIN: return "X86ISD::UMIN";
20253 case X86ISD::SMAX: return "X86ISD::SMAX";
20254 case X86ISD::SMIN: return "X86ISD::SMIN";
20255 case X86ISD::FMAX: return "X86ISD::FMAX";
20256 case X86ISD::FMIN: return "X86ISD::FMIN";
20257 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20258 case X86ISD::FMINC: return "X86ISD::FMINC";
20259 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20260 case X86ISD::FRCP: return "X86ISD::FRCP";
20261 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20262 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20263 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20264 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20265 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20266 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20267 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20268 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20269 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20270 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20271 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20272 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20273 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20274 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20275 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20276 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20277 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20278 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20279 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20280 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20281 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20282 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20283 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20284 case X86ISD::VSHL: return "X86ISD::VSHL";
20285 case X86ISD::VSRL: return "X86ISD::VSRL";
20286 case X86ISD::VSRA: return "X86ISD::VSRA";
20287 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20288 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20289 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20290 case X86ISD::CMPP: return "X86ISD::CMPP";
20291 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20292 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20293 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20294 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20295 case X86ISD::ADD: return "X86ISD::ADD";
20296 case X86ISD::SUB: return "X86ISD::SUB";
20297 case X86ISD::ADC: return "X86ISD::ADC";
20298 case X86ISD::SBB: return "X86ISD::SBB";
20299 case X86ISD::SMUL: return "X86ISD::SMUL";
20300 case X86ISD::UMUL: return "X86ISD::UMUL";
20301 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20302 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20303 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20304 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20305 case X86ISD::INC: return "X86ISD::INC";
20306 case X86ISD::DEC: return "X86ISD::DEC";
20307 case X86ISD::OR: return "X86ISD::OR";
20308 case X86ISD::XOR: return "X86ISD::XOR";
20309 case X86ISD::AND: return "X86ISD::AND";
20310 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20311 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20312 case X86ISD::PTEST: return "X86ISD::PTEST";
20313 case X86ISD::TESTP: return "X86ISD::TESTP";
20314 case X86ISD::TESTM: return "X86ISD::TESTM";
20315 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20316 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20317 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20318 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20319 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20320 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20321 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20322 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20323 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20324 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20325 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20326 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20327 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20328 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20329 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20330 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20331 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20332 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20333 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20334 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20335 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20336 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20337 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20338 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20339 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20340 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20341 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20342 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20343 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20344 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20345 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20346 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20347 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20348 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20349 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20350 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20351 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20352 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20353 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20354 case X86ISD::SAHF: return "X86ISD::SAHF";
20355 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20356 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20357 case X86ISD::FMADD: return "X86ISD::FMADD";
20358 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20359 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20360 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20361 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20362 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20363 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20364 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20365 case X86ISD::XTEST: return "X86ISD::XTEST";
20366 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20367 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20368 case X86ISD::SELECT: return "X86ISD::SELECT";
20369 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20370 case X86ISD::RCP28: return "X86ISD::RCP28";
20371 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20372 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
20373 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
20374 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
20375 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
20379 // isLegalAddressingMode - Return true if the addressing mode represented
20380 // by AM is legal for this target, for a load/store of the specified type.
20381 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20383 // X86 supports extremely general addressing modes.
20384 CodeModel::Model M = getTargetMachine().getCodeModel();
20385 Reloc::Model R = getTargetMachine().getRelocationModel();
20387 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20388 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20393 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20395 // If a reference to this global requires an extra load, we can't fold it.
20396 if (isGlobalStubReference(GVFlags))
20399 // If BaseGV requires a register for the PIC base, we cannot also have a
20400 // BaseReg specified.
20401 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20404 // If lower 4G is not available, then we must use rip-relative addressing.
20405 if ((M != CodeModel::Small || R != Reloc::Static) &&
20406 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20410 switch (AM.Scale) {
20416 // These scales always work.
20421 // These scales are formed with basereg+scalereg. Only accept if there is
20426 default: // Other stuff never works.
20433 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20434 unsigned Bits = Ty->getScalarSizeInBits();
20436 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20437 // particularly cheaper than those without.
20441 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20442 // variable shifts just as cheap as scalar ones.
20443 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20446 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20447 // fully general vector.
20451 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20452 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20454 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20455 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20456 return NumBits1 > NumBits2;
20459 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20460 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20463 if (!isTypeLegal(EVT::getEVT(Ty1)))
20466 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20468 // Assuming the caller doesn't have a zeroext or signext return parameter,
20469 // truncation all the way down to i1 is valid.
20473 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20474 return isInt<32>(Imm);
20477 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20478 // Can also use sub to handle negated immediates.
20479 return isInt<32>(Imm);
20482 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20483 if (!VT1.isInteger() || !VT2.isInteger())
20485 unsigned NumBits1 = VT1.getSizeInBits();
20486 unsigned NumBits2 = VT2.getSizeInBits();
20487 return NumBits1 > NumBits2;
20490 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20491 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20492 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20495 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20496 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20497 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20500 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20501 EVT VT1 = Val.getValueType();
20502 if (isZExtFree(VT1, VT2))
20505 if (Val.getOpcode() != ISD::LOAD)
20508 if (!VT1.isSimple() || !VT1.isInteger() ||
20509 !VT2.isSimple() || !VT2.isInteger())
20512 switch (VT1.getSimpleVT().SimpleTy) {
20517 // X86 has 8, 16, and 32-bit zero-extending loads.
20524 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20527 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20528 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20531 VT = VT.getScalarType();
20533 if (!VT.isSimple())
20536 switch (VT.getSimpleVT().SimpleTy) {
20547 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20548 // i16 instructions are longer (0x66 prefix) and potentially slower.
20549 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20552 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20553 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20554 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20555 /// are assumed to be legal.
20557 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20559 if (!VT.isSimple())
20562 // Very little shuffling can be done for 64-bit vectors right now.
20563 if (VT.getSizeInBits() == 64)
20566 // We only care that the types being shuffled are legal. The lowering can
20567 // handle any possible shuffle mask that results.
20568 return isTypeLegal(VT.getSimpleVT());
20572 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20574 // Just delegate to the generic legality, clear masks aren't special.
20575 return isShuffleMaskLegal(Mask, VT);
20578 //===----------------------------------------------------------------------===//
20579 // X86 Scheduler Hooks
20580 //===----------------------------------------------------------------------===//
20582 /// Utility function to emit xbegin specifying the start of an RTM region.
20583 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20584 const TargetInstrInfo *TII) {
20585 DebugLoc DL = MI->getDebugLoc();
20587 const BasicBlock *BB = MBB->getBasicBlock();
20588 MachineFunction::iterator I = MBB;
20591 // For the v = xbegin(), we generate
20602 MachineBasicBlock *thisMBB = MBB;
20603 MachineFunction *MF = MBB->getParent();
20604 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20605 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20606 MF->insert(I, mainMBB);
20607 MF->insert(I, sinkMBB);
20609 // Transfer the remainder of BB and its successor edges to sinkMBB.
20610 sinkMBB->splice(sinkMBB->begin(), MBB,
20611 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20612 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20616 // # fallthrough to mainMBB
20617 // # abortion to sinkMBB
20618 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20619 thisMBB->addSuccessor(mainMBB);
20620 thisMBB->addSuccessor(sinkMBB);
20624 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20625 mainMBB->addSuccessor(sinkMBB);
20628 // EAX is live into the sinkMBB
20629 sinkMBB->addLiveIn(X86::EAX);
20630 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20631 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20634 MI->eraseFromParent();
20638 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20639 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20640 // in the .td file.
20641 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20642 const TargetInstrInfo *TII) {
20644 switch (MI->getOpcode()) {
20645 default: llvm_unreachable("illegal opcode!");
20646 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20647 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20648 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20649 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20650 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20651 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20652 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20653 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20656 DebugLoc dl = MI->getDebugLoc();
20657 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20659 unsigned NumArgs = MI->getNumOperands();
20660 for (unsigned i = 1; i < NumArgs; ++i) {
20661 MachineOperand &Op = MI->getOperand(i);
20662 if (!(Op.isReg() && Op.isImplicit()))
20663 MIB.addOperand(Op);
20665 if (MI->hasOneMemOperand())
20666 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20668 BuildMI(*BB, MI, dl,
20669 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20670 .addReg(X86::XMM0);
20672 MI->eraseFromParent();
20676 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20677 // defs in an instruction pattern
20678 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20679 const TargetInstrInfo *TII) {
20681 switch (MI->getOpcode()) {
20682 default: llvm_unreachable("illegal opcode!");
20683 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20684 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20685 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20686 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20687 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20688 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20689 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20690 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20693 DebugLoc dl = MI->getDebugLoc();
20694 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20696 unsigned NumArgs = MI->getNumOperands(); // remove the results
20697 for (unsigned i = 1; i < NumArgs; ++i) {
20698 MachineOperand &Op = MI->getOperand(i);
20699 if (!(Op.isReg() && Op.isImplicit()))
20700 MIB.addOperand(Op);
20702 if (MI->hasOneMemOperand())
20703 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20705 BuildMI(*BB, MI, dl,
20706 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20709 MI->eraseFromParent();
20713 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20714 const X86Subtarget *Subtarget) {
20715 DebugLoc dl = MI->getDebugLoc();
20716 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20717 // Address into RAX/EAX, other two args into ECX, EDX.
20718 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20719 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20720 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20721 for (int i = 0; i < X86::AddrNumOperands; ++i)
20722 MIB.addOperand(MI->getOperand(i));
20724 unsigned ValOps = X86::AddrNumOperands;
20725 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20726 .addReg(MI->getOperand(ValOps).getReg());
20727 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20728 .addReg(MI->getOperand(ValOps+1).getReg());
20730 // The instruction doesn't actually take any operands though.
20731 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20733 MI->eraseFromParent(); // The pseudo is gone now.
20737 MachineBasicBlock *
20738 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20739 MachineBasicBlock *MBB) const {
20740 // Emit va_arg instruction on X86-64.
20742 // Operands to this pseudo-instruction:
20743 // 0 ) Output : destination address (reg)
20744 // 1-5) Input : va_list address (addr, i64mem)
20745 // 6 ) ArgSize : Size (in bytes) of vararg type
20746 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20747 // 8 ) Align : Alignment of type
20748 // 9 ) EFLAGS (implicit-def)
20750 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20751 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20753 unsigned DestReg = MI->getOperand(0).getReg();
20754 MachineOperand &Base = MI->getOperand(1);
20755 MachineOperand &Scale = MI->getOperand(2);
20756 MachineOperand &Index = MI->getOperand(3);
20757 MachineOperand &Disp = MI->getOperand(4);
20758 MachineOperand &Segment = MI->getOperand(5);
20759 unsigned ArgSize = MI->getOperand(6).getImm();
20760 unsigned ArgMode = MI->getOperand(7).getImm();
20761 unsigned Align = MI->getOperand(8).getImm();
20763 // Memory Reference
20764 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20765 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20766 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20768 // Machine Information
20769 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20770 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20771 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20772 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20773 DebugLoc DL = MI->getDebugLoc();
20775 // struct va_list {
20778 // i64 overflow_area (address)
20779 // i64 reg_save_area (address)
20781 // sizeof(va_list) = 24
20782 // alignment(va_list) = 8
20784 unsigned TotalNumIntRegs = 6;
20785 unsigned TotalNumXMMRegs = 8;
20786 bool UseGPOffset = (ArgMode == 1);
20787 bool UseFPOffset = (ArgMode == 2);
20788 unsigned MaxOffset = TotalNumIntRegs * 8 +
20789 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20791 /* Align ArgSize to a multiple of 8 */
20792 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20793 bool NeedsAlign = (Align > 8);
20795 MachineBasicBlock *thisMBB = MBB;
20796 MachineBasicBlock *overflowMBB;
20797 MachineBasicBlock *offsetMBB;
20798 MachineBasicBlock *endMBB;
20800 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20801 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20802 unsigned OffsetReg = 0;
20804 if (!UseGPOffset && !UseFPOffset) {
20805 // If we only pull from the overflow region, we don't create a branch.
20806 // We don't need to alter control flow.
20807 OffsetDestReg = 0; // unused
20808 OverflowDestReg = DestReg;
20810 offsetMBB = nullptr;
20811 overflowMBB = thisMBB;
20814 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20815 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20816 // If not, pull from overflow_area. (branch to overflowMBB)
20821 // offsetMBB overflowMBB
20826 // Registers for the PHI in endMBB
20827 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20828 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20830 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20831 MachineFunction *MF = MBB->getParent();
20832 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20833 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20834 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20836 MachineFunction::iterator MBBIter = MBB;
20839 // Insert the new basic blocks
20840 MF->insert(MBBIter, offsetMBB);
20841 MF->insert(MBBIter, overflowMBB);
20842 MF->insert(MBBIter, endMBB);
20844 // Transfer the remainder of MBB and its successor edges to endMBB.
20845 endMBB->splice(endMBB->begin(), thisMBB,
20846 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20847 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20849 // Make offsetMBB and overflowMBB successors of thisMBB
20850 thisMBB->addSuccessor(offsetMBB);
20851 thisMBB->addSuccessor(overflowMBB);
20853 // endMBB is a successor of both offsetMBB and overflowMBB
20854 offsetMBB->addSuccessor(endMBB);
20855 overflowMBB->addSuccessor(endMBB);
20857 // Load the offset value into a register
20858 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20859 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20863 .addDisp(Disp, UseFPOffset ? 4 : 0)
20864 .addOperand(Segment)
20865 .setMemRefs(MMOBegin, MMOEnd);
20867 // Check if there is enough room left to pull this argument.
20868 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20870 .addImm(MaxOffset + 8 - ArgSizeA8);
20872 // Branch to "overflowMBB" if offset >= max
20873 // Fall through to "offsetMBB" otherwise
20874 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20875 .addMBB(overflowMBB);
20878 // In offsetMBB, emit code to use the reg_save_area.
20880 assert(OffsetReg != 0);
20882 // Read the reg_save_area address.
20883 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20884 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20889 .addOperand(Segment)
20890 .setMemRefs(MMOBegin, MMOEnd);
20892 // Zero-extend the offset
20893 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20894 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20897 .addImm(X86::sub_32bit);
20899 // Add the offset to the reg_save_area to get the final address.
20900 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20901 .addReg(OffsetReg64)
20902 .addReg(RegSaveReg);
20904 // Compute the offset for the next argument
20905 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20906 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20908 .addImm(UseFPOffset ? 16 : 8);
20910 // Store it back into the va_list.
20911 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20915 .addDisp(Disp, UseFPOffset ? 4 : 0)
20916 .addOperand(Segment)
20917 .addReg(NextOffsetReg)
20918 .setMemRefs(MMOBegin, MMOEnd);
20921 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20926 // Emit code to use overflow area
20929 // Load the overflow_area address into a register.
20930 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20931 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
20936 .addOperand(Segment)
20937 .setMemRefs(MMOBegin, MMOEnd);
20939 // If we need to align it, do so. Otherwise, just copy the address
20940 // to OverflowDestReg.
20942 // Align the overflow address
20943 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
20944 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
20946 // aligned_addr = (addr + (align-1)) & ~(align-1)
20947 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
20948 .addReg(OverflowAddrReg)
20951 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
20953 .addImm(~(uint64_t)(Align-1));
20955 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
20956 .addReg(OverflowAddrReg);
20959 // Compute the next overflow address after this argument.
20960 // (the overflow address should be kept 8-byte aligned)
20961 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
20962 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
20963 .addReg(OverflowDestReg)
20964 .addImm(ArgSizeA8);
20966 // Store the new overflow address.
20967 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
20972 .addOperand(Segment)
20973 .addReg(NextAddrReg)
20974 .setMemRefs(MMOBegin, MMOEnd);
20976 // If we branched, emit the PHI to the front of endMBB.
20978 BuildMI(*endMBB, endMBB->begin(), DL,
20979 TII->get(X86::PHI), DestReg)
20980 .addReg(OffsetDestReg).addMBB(offsetMBB)
20981 .addReg(OverflowDestReg).addMBB(overflowMBB);
20984 // Erase the pseudo instruction
20985 MI->eraseFromParent();
20990 MachineBasicBlock *
20991 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
20993 MachineBasicBlock *MBB) const {
20994 // Emit code to save XMM registers to the stack. The ABI says that the
20995 // number of registers to save is given in %al, so it's theoretically
20996 // possible to do an indirect jump trick to avoid saving all of them,
20997 // however this code takes a simpler approach and just executes all
20998 // of the stores if %al is non-zero. It's less code, and it's probably
20999 // easier on the hardware branch predictor, and stores aren't all that
21000 // expensive anyway.
21002 // Create the new basic blocks. One block contains all the XMM stores,
21003 // and one block is the final destination regardless of whether any
21004 // stores were performed.
21005 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21006 MachineFunction *F = MBB->getParent();
21007 MachineFunction::iterator MBBIter = MBB;
21009 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21010 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21011 F->insert(MBBIter, XMMSaveMBB);
21012 F->insert(MBBIter, EndMBB);
21014 // Transfer the remainder of MBB and its successor edges to EndMBB.
21015 EndMBB->splice(EndMBB->begin(), MBB,
21016 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21017 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21019 // The original block will now fall through to the XMM save block.
21020 MBB->addSuccessor(XMMSaveMBB);
21021 // The XMMSaveMBB will fall through to the end block.
21022 XMMSaveMBB->addSuccessor(EndMBB);
21024 // Now add the instructions.
21025 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21026 DebugLoc DL = MI->getDebugLoc();
21028 unsigned CountReg = MI->getOperand(0).getReg();
21029 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21030 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21032 if (!Subtarget->isTargetWin64()) {
21033 // If %al is 0, branch around the XMM save block.
21034 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21035 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21036 MBB->addSuccessor(EndMBB);
21039 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21040 // that was just emitted, but clearly shouldn't be "saved".
21041 assert((MI->getNumOperands() <= 3 ||
21042 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21043 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21044 && "Expected last argument to be EFLAGS");
21045 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21046 // In the XMM save block, save all the XMM argument registers.
21047 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21048 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21049 MachineMemOperand *MMO =
21050 F->getMachineMemOperand(
21051 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21052 MachineMemOperand::MOStore,
21053 /*Size=*/16, /*Align=*/16);
21054 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21055 .addFrameIndex(RegSaveFrameIndex)
21056 .addImm(/*Scale=*/1)
21057 .addReg(/*IndexReg=*/0)
21058 .addImm(/*Disp=*/Offset)
21059 .addReg(/*Segment=*/0)
21060 .addReg(MI->getOperand(i).getReg())
21061 .addMemOperand(MMO);
21064 MI->eraseFromParent(); // The pseudo instruction is gone now.
21069 // The EFLAGS operand of SelectItr might be missing a kill marker
21070 // because there were multiple uses of EFLAGS, and ISel didn't know
21071 // which to mark. Figure out whether SelectItr should have had a
21072 // kill marker, and set it if it should. Returns the correct kill
21074 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21075 MachineBasicBlock* BB,
21076 const TargetRegisterInfo* TRI) {
21077 // Scan forward through BB for a use/def of EFLAGS.
21078 MachineBasicBlock::iterator miI(std::next(SelectItr));
21079 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21080 const MachineInstr& mi = *miI;
21081 if (mi.readsRegister(X86::EFLAGS))
21083 if (mi.definesRegister(X86::EFLAGS))
21084 break; // Should have kill-flag - update below.
21087 // If we hit the end of the block, check whether EFLAGS is live into a
21089 if (miI == BB->end()) {
21090 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21091 sEnd = BB->succ_end();
21092 sItr != sEnd; ++sItr) {
21093 MachineBasicBlock* succ = *sItr;
21094 if (succ->isLiveIn(X86::EFLAGS))
21099 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21100 // out. SelectMI should have a kill flag on EFLAGS.
21101 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21105 MachineBasicBlock *
21106 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21107 MachineBasicBlock *BB) const {
21108 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21109 DebugLoc DL = MI->getDebugLoc();
21111 // To "insert" a SELECT_CC instruction, we actually have to insert the
21112 // diamond control-flow pattern. The incoming instruction knows the
21113 // destination vreg to set, the condition code register to branch on, the
21114 // true/false values to select between, and a branch opcode to use.
21115 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21116 MachineFunction::iterator It = BB;
21122 // cmpTY ccX, r1, r2
21124 // fallthrough --> copy0MBB
21125 MachineBasicBlock *thisMBB = BB;
21126 MachineFunction *F = BB->getParent();
21127 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21128 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21129 F->insert(It, copy0MBB);
21130 F->insert(It, sinkMBB);
21132 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21133 // live into the sink and copy blocks.
21134 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21135 if (!MI->killsRegister(X86::EFLAGS) &&
21136 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21137 copy0MBB->addLiveIn(X86::EFLAGS);
21138 sinkMBB->addLiveIn(X86::EFLAGS);
21141 // Transfer the remainder of BB and its successor edges to sinkMBB.
21142 sinkMBB->splice(sinkMBB->begin(), BB,
21143 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21144 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21146 // Add the true and fallthrough blocks as its successors.
21147 BB->addSuccessor(copy0MBB);
21148 BB->addSuccessor(sinkMBB);
21150 // Create the conditional branch instruction.
21152 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21153 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21156 // %FalseValue = ...
21157 // # fallthrough to sinkMBB
21158 copy0MBB->addSuccessor(sinkMBB);
21161 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21163 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21164 TII->get(X86::PHI), MI->getOperand(0).getReg())
21165 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21166 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21168 MI->eraseFromParent(); // The pseudo instruction is gone now.
21172 MachineBasicBlock *
21173 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21174 MachineBasicBlock *BB) const {
21175 MachineFunction *MF = BB->getParent();
21176 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21177 DebugLoc DL = MI->getDebugLoc();
21178 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21180 assert(MF->shouldSplitStack());
21182 const bool Is64Bit = Subtarget->is64Bit();
21183 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21185 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21186 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21189 // ... [Till the alloca]
21190 // If stacklet is not large enough, jump to mallocMBB
21193 // Allocate by subtracting from RSP
21194 // Jump to continueMBB
21197 // Allocate by call to runtime
21201 // [rest of original BB]
21204 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21205 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21206 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21208 MachineRegisterInfo &MRI = MF->getRegInfo();
21209 const TargetRegisterClass *AddrRegClass =
21210 getRegClassFor(getPointerTy());
21212 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21213 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21214 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21215 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21216 sizeVReg = MI->getOperand(1).getReg(),
21217 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21219 MachineFunction::iterator MBBIter = BB;
21222 MF->insert(MBBIter, bumpMBB);
21223 MF->insert(MBBIter, mallocMBB);
21224 MF->insert(MBBIter, continueMBB);
21226 continueMBB->splice(continueMBB->begin(), BB,
21227 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21228 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21230 // Add code to the main basic block to check if the stack limit has been hit,
21231 // and if so, jump to mallocMBB otherwise to bumpMBB.
21232 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21233 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21234 .addReg(tmpSPVReg).addReg(sizeVReg);
21235 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21236 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21237 .addReg(SPLimitVReg);
21238 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21240 // bumpMBB simply decreases the stack pointer, since we know the current
21241 // stacklet has enough space.
21242 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21243 .addReg(SPLimitVReg);
21244 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21245 .addReg(SPLimitVReg);
21246 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21248 // Calls into a routine in libgcc to allocate more space from the heap.
21249 const uint32_t *RegMask =
21250 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21252 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21254 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21255 .addExternalSymbol("__morestack_allocate_stack_space")
21256 .addRegMask(RegMask)
21257 .addReg(X86::RDI, RegState::Implicit)
21258 .addReg(X86::RAX, RegState::ImplicitDefine);
21259 } else if (Is64Bit) {
21260 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21262 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21263 .addExternalSymbol("__morestack_allocate_stack_space")
21264 .addRegMask(RegMask)
21265 .addReg(X86::EDI, RegState::Implicit)
21266 .addReg(X86::EAX, RegState::ImplicitDefine);
21268 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21270 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21271 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21272 .addExternalSymbol("__morestack_allocate_stack_space")
21273 .addRegMask(RegMask)
21274 .addReg(X86::EAX, RegState::ImplicitDefine);
21278 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21281 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21282 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21283 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21285 // Set up the CFG correctly.
21286 BB->addSuccessor(bumpMBB);
21287 BB->addSuccessor(mallocMBB);
21288 mallocMBB->addSuccessor(continueMBB);
21289 bumpMBB->addSuccessor(continueMBB);
21291 // Take care of the PHI nodes.
21292 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21293 MI->getOperand(0).getReg())
21294 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21295 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21297 // Delete the original pseudo instruction.
21298 MI->eraseFromParent();
21301 return continueMBB;
21304 MachineBasicBlock *
21305 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21306 MachineBasicBlock *BB) const {
21307 DebugLoc DL = MI->getDebugLoc();
21309 assert(!Subtarget->isTargetMachO());
21311 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21313 MI->eraseFromParent(); // The pseudo instruction is gone now.
21317 MachineBasicBlock *
21318 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21319 MachineBasicBlock *BB) const {
21320 // This is pretty easy. We're taking the value that we received from
21321 // our load from the relocation, sticking it in either RDI (x86-64)
21322 // or EAX and doing an indirect call. The return value will then
21323 // be in the normal return register.
21324 MachineFunction *F = BB->getParent();
21325 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21326 DebugLoc DL = MI->getDebugLoc();
21328 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21329 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21331 // Get a register mask for the lowered call.
21332 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21333 // proper register mask.
21334 const uint32_t *RegMask =
21335 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21336 if (Subtarget->is64Bit()) {
21337 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21338 TII->get(X86::MOV64rm), X86::RDI)
21340 .addImm(0).addReg(0)
21341 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21342 MI->getOperand(3).getTargetFlags())
21344 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21345 addDirectMem(MIB, X86::RDI);
21346 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21347 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21348 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21349 TII->get(X86::MOV32rm), X86::EAX)
21351 .addImm(0).addReg(0)
21352 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21353 MI->getOperand(3).getTargetFlags())
21355 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21356 addDirectMem(MIB, X86::EAX);
21357 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21359 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21360 TII->get(X86::MOV32rm), X86::EAX)
21361 .addReg(TII->getGlobalBaseReg(F))
21362 .addImm(0).addReg(0)
21363 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21364 MI->getOperand(3).getTargetFlags())
21366 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21367 addDirectMem(MIB, X86::EAX);
21368 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21371 MI->eraseFromParent(); // The pseudo instruction is gone now.
21375 MachineBasicBlock *
21376 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21377 MachineBasicBlock *MBB) const {
21378 DebugLoc DL = MI->getDebugLoc();
21379 MachineFunction *MF = MBB->getParent();
21380 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21381 MachineRegisterInfo &MRI = MF->getRegInfo();
21383 const BasicBlock *BB = MBB->getBasicBlock();
21384 MachineFunction::iterator I = MBB;
21387 // Memory Reference
21388 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21389 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21392 unsigned MemOpndSlot = 0;
21394 unsigned CurOp = 0;
21396 DstReg = MI->getOperand(CurOp++).getReg();
21397 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21398 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21399 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21400 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21402 MemOpndSlot = CurOp;
21404 MVT PVT = getPointerTy();
21405 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21406 "Invalid Pointer Size!");
21408 // For v = setjmp(buf), we generate
21411 // buf[LabelOffset] = restoreMBB
21412 // SjLjSetup restoreMBB
21418 // v = phi(main, restore)
21421 // if base pointer being used, load it from frame
21424 MachineBasicBlock *thisMBB = MBB;
21425 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21426 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21427 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21428 MF->insert(I, mainMBB);
21429 MF->insert(I, sinkMBB);
21430 MF->push_back(restoreMBB);
21432 MachineInstrBuilder MIB;
21434 // Transfer the remainder of BB and its successor edges to sinkMBB.
21435 sinkMBB->splice(sinkMBB->begin(), MBB,
21436 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21437 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21440 unsigned PtrStoreOpc = 0;
21441 unsigned LabelReg = 0;
21442 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21443 Reloc::Model RM = MF->getTarget().getRelocationModel();
21444 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21445 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21447 // Prepare IP either in reg or imm.
21448 if (!UseImmLabel) {
21449 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21450 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21451 LabelReg = MRI.createVirtualRegister(PtrRC);
21452 if (Subtarget->is64Bit()) {
21453 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21457 .addMBB(restoreMBB)
21460 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21461 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21462 .addReg(XII->getGlobalBaseReg(MF))
21465 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21469 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21471 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21472 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21473 if (i == X86::AddrDisp)
21474 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21476 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21479 MIB.addReg(LabelReg);
21481 MIB.addMBB(restoreMBB);
21482 MIB.setMemRefs(MMOBegin, MMOEnd);
21484 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21485 .addMBB(restoreMBB);
21487 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21488 MIB.addRegMask(RegInfo->getNoPreservedMask());
21489 thisMBB->addSuccessor(mainMBB);
21490 thisMBB->addSuccessor(restoreMBB);
21494 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21495 mainMBB->addSuccessor(sinkMBB);
21498 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21499 TII->get(X86::PHI), DstReg)
21500 .addReg(mainDstReg).addMBB(mainMBB)
21501 .addReg(restoreDstReg).addMBB(restoreMBB);
21504 if (RegInfo->hasBasePointer(*MF)) {
21505 const bool Uses64BitFramePtr =
21506 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21507 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21508 X86FI->setRestoreBasePointer(MF);
21509 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21510 unsigned BasePtr = RegInfo->getBaseRegister();
21511 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21512 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21513 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21514 .setMIFlag(MachineInstr::FrameSetup);
21516 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21517 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21518 restoreMBB->addSuccessor(sinkMBB);
21520 MI->eraseFromParent();
21524 MachineBasicBlock *
21525 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21526 MachineBasicBlock *MBB) const {
21527 DebugLoc DL = MI->getDebugLoc();
21528 MachineFunction *MF = MBB->getParent();
21529 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21530 MachineRegisterInfo &MRI = MF->getRegInfo();
21532 // Memory Reference
21533 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21534 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21536 MVT PVT = getPointerTy();
21537 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21538 "Invalid Pointer Size!");
21540 const TargetRegisterClass *RC =
21541 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21542 unsigned Tmp = MRI.createVirtualRegister(RC);
21543 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21544 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21545 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21546 unsigned SP = RegInfo->getStackRegister();
21548 MachineInstrBuilder MIB;
21550 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21551 const int64_t SPOffset = 2 * PVT.getStoreSize();
21553 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21554 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21557 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21558 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21559 MIB.addOperand(MI->getOperand(i));
21560 MIB.setMemRefs(MMOBegin, MMOEnd);
21562 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21563 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21564 if (i == X86::AddrDisp)
21565 MIB.addDisp(MI->getOperand(i), LabelOffset);
21567 MIB.addOperand(MI->getOperand(i));
21569 MIB.setMemRefs(MMOBegin, MMOEnd);
21571 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21572 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21573 if (i == X86::AddrDisp)
21574 MIB.addDisp(MI->getOperand(i), SPOffset);
21576 MIB.addOperand(MI->getOperand(i));
21578 MIB.setMemRefs(MMOBegin, MMOEnd);
21580 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21582 MI->eraseFromParent();
21586 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21587 // accumulator loops. Writing back to the accumulator allows the coalescer
21588 // to remove extra copies in the loop.
21589 MachineBasicBlock *
21590 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21591 MachineBasicBlock *MBB) const {
21592 MachineOperand &AddendOp = MI->getOperand(3);
21594 // Bail out early if the addend isn't a register - we can't switch these.
21595 if (!AddendOp.isReg())
21598 MachineFunction &MF = *MBB->getParent();
21599 MachineRegisterInfo &MRI = MF.getRegInfo();
21601 // Check whether the addend is defined by a PHI:
21602 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21603 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21604 if (!AddendDef.isPHI())
21607 // Look for the following pattern:
21609 // %addend = phi [%entry, 0], [%loop, %result]
21611 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21615 // %addend = phi [%entry, 0], [%loop, %result]
21617 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21619 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21620 assert(AddendDef.getOperand(i).isReg());
21621 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21622 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21623 if (&PHISrcInst == MI) {
21624 // Found a matching instruction.
21625 unsigned NewFMAOpc = 0;
21626 switch (MI->getOpcode()) {
21627 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21628 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21629 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21630 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21631 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21632 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21633 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21634 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21635 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21636 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21637 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21638 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21639 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21640 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21641 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21642 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21643 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21644 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21645 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21646 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21648 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21649 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21650 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21651 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21652 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21653 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21654 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21655 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21656 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21657 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21658 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21659 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21660 default: llvm_unreachable("Unrecognized FMA variant.");
21663 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21664 MachineInstrBuilder MIB =
21665 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21666 .addOperand(MI->getOperand(0))
21667 .addOperand(MI->getOperand(3))
21668 .addOperand(MI->getOperand(2))
21669 .addOperand(MI->getOperand(1));
21670 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21671 MI->eraseFromParent();
21678 MachineBasicBlock *
21679 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21680 MachineBasicBlock *BB) const {
21681 switch (MI->getOpcode()) {
21682 default: llvm_unreachable("Unexpected instr type to insert");
21683 case X86::TAILJMPd64:
21684 case X86::TAILJMPr64:
21685 case X86::TAILJMPm64:
21686 case X86::TAILJMPd64_REX:
21687 case X86::TAILJMPr64_REX:
21688 case X86::TAILJMPm64_REX:
21689 llvm_unreachable("TAILJMP64 would not be touched here.");
21690 case X86::TCRETURNdi64:
21691 case X86::TCRETURNri64:
21692 case X86::TCRETURNmi64:
21694 case X86::WIN_ALLOCA:
21695 return EmitLoweredWinAlloca(MI, BB);
21696 case X86::SEG_ALLOCA_32:
21697 case X86::SEG_ALLOCA_64:
21698 return EmitLoweredSegAlloca(MI, BB);
21699 case X86::TLSCall_32:
21700 case X86::TLSCall_64:
21701 return EmitLoweredTLSCall(MI, BB);
21702 case X86::CMOV_GR8:
21703 case X86::CMOV_FR32:
21704 case X86::CMOV_FR64:
21705 case X86::CMOV_V4F32:
21706 case X86::CMOV_V2F64:
21707 case X86::CMOV_V2I64:
21708 case X86::CMOV_V8F32:
21709 case X86::CMOV_V4F64:
21710 case X86::CMOV_V4I64:
21711 case X86::CMOV_V16F32:
21712 case X86::CMOV_V8F64:
21713 case X86::CMOV_V8I64:
21714 case X86::CMOV_GR16:
21715 case X86::CMOV_GR32:
21716 case X86::CMOV_RFP32:
21717 case X86::CMOV_RFP64:
21718 case X86::CMOV_RFP80:
21719 return EmitLoweredSelect(MI, BB);
21721 case X86::FP32_TO_INT16_IN_MEM:
21722 case X86::FP32_TO_INT32_IN_MEM:
21723 case X86::FP32_TO_INT64_IN_MEM:
21724 case X86::FP64_TO_INT16_IN_MEM:
21725 case X86::FP64_TO_INT32_IN_MEM:
21726 case X86::FP64_TO_INT64_IN_MEM:
21727 case X86::FP80_TO_INT16_IN_MEM:
21728 case X86::FP80_TO_INT32_IN_MEM:
21729 case X86::FP80_TO_INT64_IN_MEM: {
21730 MachineFunction *F = BB->getParent();
21731 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21732 DebugLoc DL = MI->getDebugLoc();
21734 // Change the floating point control register to use "round towards zero"
21735 // mode when truncating to an integer value.
21736 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21737 addFrameReference(BuildMI(*BB, MI, DL,
21738 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21740 // Load the old value of the high byte of the control word...
21742 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21743 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21746 // Set the high part to be round to zero...
21747 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21750 // Reload the modified control word now...
21751 addFrameReference(BuildMI(*BB, MI, DL,
21752 TII->get(X86::FLDCW16m)), CWFrameIdx);
21754 // Restore the memory image of control word to original value
21755 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21758 // Get the X86 opcode to use.
21760 switch (MI->getOpcode()) {
21761 default: llvm_unreachable("illegal opcode!");
21762 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21763 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21764 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21765 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21766 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21767 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21768 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21769 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21770 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21774 MachineOperand &Op = MI->getOperand(0);
21776 AM.BaseType = X86AddressMode::RegBase;
21777 AM.Base.Reg = Op.getReg();
21779 AM.BaseType = X86AddressMode::FrameIndexBase;
21780 AM.Base.FrameIndex = Op.getIndex();
21782 Op = MI->getOperand(1);
21784 AM.Scale = Op.getImm();
21785 Op = MI->getOperand(2);
21787 AM.IndexReg = Op.getImm();
21788 Op = MI->getOperand(3);
21789 if (Op.isGlobal()) {
21790 AM.GV = Op.getGlobal();
21792 AM.Disp = Op.getImm();
21794 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21795 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21797 // Reload the original control word now.
21798 addFrameReference(BuildMI(*BB, MI, DL,
21799 TII->get(X86::FLDCW16m)), CWFrameIdx);
21801 MI->eraseFromParent(); // The pseudo instruction is gone now.
21804 // String/text processing lowering.
21805 case X86::PCMPISTRM128REG:
21806 case X86::VPCMPISTRM128REG:
21807 case X86::PCMPISTRM128MEM:
21808 case X86::VPCMPISTRM128MEM:
21809 case X86::PCMPESTRM128REG:
21810 case X86::VPCMPESTRM128REG:
21811 case X86::PCMPESTRM128MEM:
21812 case X86::VPCMPESTRM128MEM:
21813 assert(Subtarget->hasSSE42() &&
21814 "Target must have SSE4.2 or AVX features enabled");
21815 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21817 // String/text processing lowering.
21818 case X86::PCMPISTRIREG:
21819 case X86::VPCMPISTRIREG:
21820 case X86::PCMPISTRIMEM:
21821 case X86::VPCMPISTRIMEM:
21822 case X86::PCMPESTRIREG:
21823 case X86::VPCMPESTRIREG:
21824 case X86::PCMPESTRIMEM:
21825 case X86::VPCMPESTRIMEM:
21826 assert(Subtarget->hasSSE42() &&
21827 "Target must have SSE4.2 or AVX features enabled");
21828 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21830 // Thread synchronization.
21832 return EmitMonitor(MI, BB, Subtarget);
21836 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21838 case X86::VASTART_SAVE_XMM_REGS:
21839 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21841 case X86::VAARG_64:
21842 return EmitVAARG64WithCustomInserter(MI, BB);
21844 case X86::EH_SjLj_SetJmp32:
21845 case X86::EH_SjLj_SetJmp64:
21846 return emitEHSjLjSetJmp(MI, BB);
21848 case X86::EH_SjLj_LongJmp32:
21849 case X86::EH_SjLj_LongJmp64:
21850 return emitEHSjLjLongJmp(MI, BB);
21852 case TargetOpcode::STATEPOINT:
21853 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21854 // this point in the process. We diverge later.
21855 return emitPatchPoint(MI, BB);
21857 case TargetOpcode::STACKMAP:
21858 case TargetOpcode::PATCHPOINT:
21859 return emitPatchPoint(MI, BB);
21861 case X86::VFMADDPDr213r:
21862 case X86::VFMADDPSr213r:
21863 case X86::VFMADDSDr213r:
21864 case X86::VFMADDSSr213r:
21865 case X86::VFMSUBPDr213r:
21866 case X86::VFMSUBPSr213r:
21867 case X86::VFMSUBSDr213r:
21868 case X86::VFMSUBSSr213r:
21869 case X86::VFNMADDPDr213r:
21870 case X86::VFNMADDPSr213r:
21871 case X86::VFNMADDSDr213r:
21872 case X86::VFNMADDSSr213r:
21873 case X86::VFNMSUBPDr213r:
21874 case X86::VFNMSUBPSr213r:
21875 case X86::VFNMSUBSDr213r:
21876 case X86::VFNMSUBSSr213r:
21877 case X86::VFMADDSUBPDr213r:
21878 case X86::VFMADDSUBPSr213r:
21879 case X86::VFMSUBADDPDr213r:
21880 case X86::VFMSUBADDPSr213r:
21881 case X86::VFMADDPDr213rY:
21882 case X86::VFMADDPSr213rY:
21883 case X86::VFMSUBPDr213rY:
21884 case X86::VFMSUBPSr213rY:
21885 case X86::VFNMADDPDr213rY:
21886 case X86::VFNMADDPSr213rY:
21887 case X86::VFNMSUBPDr213rY:
21888 case X86::VFNMSUBPSr213rY:
21889 case X86::VFMADDSUBPDr213rY:
21890 case X86::VFMADDSUBPSr213rY:
21891 case X86::VFMSUBADDPDr213rY:
21892 case X86::VFMSUBADDPSr213rY:
21893 return emitFMA3Instr(MI, BB);
21897 //===----------------------------------------------------------------------===//
21898 // X86 Optimization Hooks
21899 //===----------------------------------------------------------------------===//
21901 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21904 const SelectionDAG &DAG,
21905 unsigned Depth) const {
21906 unsigned BitWidth = KnownZero.getBitWidth();
21907 unsigned Opc = Op.getOpcode();
21908 assert((Opc >= ISD::BUILTIN_OP_END ||
21909 Opc == ISD::INTRINSIC_WO_CHAIN ||
21910 Opc == ISD::INTRINSIC_W_CHAIN ||
21911 Opc == ISD::INTRINSIC_VOID) &&
21912 "Should use MaskedValueIsZero if you don't know whether Op"
21913 " is a target node!");
21915 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21929 // These nodes' second result is a boolean.
21930 if (Op.getResNo() == 0)
21933 case X86ISD::SETCC:
21934 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
21936 case ISD::INTRINSIC_WO_CHAIN: {
21937 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
21938 unsigned NumLoBits = 0;
21941 case Intrinsic::x86_sse_movmsk_ps:
21942 case Intrinsic::x86_avx_movmsk_ps_256:
21943 case Intrinsic::x86_sse2_movmsk_pd:
21944 case Intrinsic::x86_avx_movmsk_pd_256:
21945 case Intrinsic::x86_mmx_pmovmskb:
21946 case Intrinsic::x86_sse2_pmovmskb_128:
21947 case Intrinsic::x86_avx2_pmovmskb: {
21948 // High bits of movmskp{s|d}, pmovmskb are known zero.
21950 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
21951 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
21952 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
21953 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
21954 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
21955 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
21956 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
21957 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
21959 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
21968 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
21970 const SelectionDAG &,
21971 unsigned Depth) const {
21972 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
21973 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
21974 return Op.getValueType().getScalarType().getSizeInBits();
21980 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
21981 /// node is a GlobalAddress + offset.
21982 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
21983 const GlobalValue* &GA,
21984 int64_t &Offset) const {
21985 if (N->getOpcode() == X86ISD::Wrapper) {
21986 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
21987 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
21988 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
21992 return TargetLowering::isGAPlusOffset(N, GA, Offset);
21995 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
21996 /// same as extracting the high 128-bit part of 256-bit vector and then
21997 /// inserting the result into the low part of a new 256-bit vector
21998 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
21999 EVT VT = SVOp->getValueType(0);
22000 unsigned NumElems = VT.getVectorNumElements();
22002 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22003 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22004 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22005 SVOp->getMaskElt(j) >= 0)
22011 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22012 /// same as extracting the low 128-bit part of 256-bit vector and then
22013 /// inserting the result into the high part of a new 256-bit vector
22014 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22015 EVT VT = SVOp->getValueType(0);
22016 unsigned NumElems = VT.getVectorNumElements();
22018 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22019 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22020 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22021 SVOp->getMaskElt(j) >= 0)
22027 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22028 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22029 TargetLowering::DAGCombinerInfo &DCI,
22030 const X86Subtarget* Subtarget) {
22032 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22033 SDValue V1 = SVOp->getOperand(0);
22034 SDValue V2 = SVOp->getOperand(1);
22035 EVT VT = SVOp->getValueType(0);
22036 unsigned NumElems = VT.getVectorNumElements();
22038 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22039 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22043 // V UNDEF BUILD_VECTOR UNDEF
22045 // CONCAT_VECTOR CONCAT_VECTOR
22048 // RESULT: V + zero extended
22050 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22051 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22052 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22055 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22058 // To match the shuffle mask, the first half of the mask should
22059 // be exactly the first vector, and all the rest a splat with the
22060 // first element of the second one.
22061 for (unsigned i = 0; i != NumElems/2; ++i)
22062 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22063 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22066 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22067 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22068 if (Ld->hasNUsesOfValue(1, 0)) {
22069 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22070 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22072 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22074 Ld->getPointerInfo(),
22075 Ld->getAlignment(),
22076 false/*isVolatile*/, true/*ReadMem*/,
22077 false/*WriteMem*/);
22079 // Make sure the newly-created LOAD is in the same position as Ld in
22080 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22081 // and update uses of Ld's output chain to use the TokenFactor.
22082 if (Ld->hasAnyUseOfValue(1)) {
22083 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22084 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22085 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22086 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22087 SDValue(ResNode.getNode(), 1));
22090 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22094 // Emit a zeroed vector and insert the desired subvector on its
22096 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22097 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22098 return DCI.CombineTo(N, InsV);
22101 //===--------------------------------------------------------------------===//
22102 // Combine some shuffles into subvector extracts and inserts:
22105 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22106 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22107 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22108 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22109 return DCI.CombineTo(N, InsV);
22112 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22113 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22114 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22115 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22116 return DCI.CombineTo(N, InsV);
22122 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22125 /// This is the leaf of the recursive combinine below. When we have found some
22126 /// chain of single-use x86 shuffle instructions and accumulated the combined
22127 /// shuffle mask represented by them, this will try to pattern match that mask
22128 /// into either a single instruction if there is a special purpose instruction
22129 /// for this operation, or into a PSHUFB instruction which is a fully general
22130 /// instruction but should only be used to replace chains over a certain depth.
22131 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22132 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22133 TargetLowering::DAGCombinerInfo &DCI,
22134 const X86Subtarget *Subtarget) {
22135 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22137 // Find the operand that enters the chain. Note that multiple uses are OK
22138 // here, we're not going to remove the operand we find.
22139 SDValue Input = Op.getOperand(0);
22140 while (Input.getOpcode() == ISD::BITCAST)
22141 Input = Input.getOperand(0);
22143 MVT VT = Input.getSimpleValueType();
22144 MVT RootVT = Root.getSimpleValueType();
22147 // Just remove no-op shuffle masks.
22148 if (Mask.size() == 1) {
22149 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22154 // Use the float domain if the operand type is a floating point type.
22155 bool FloatDomain = VT.isFloatingPoint();
22157 // For floating point shuffles, we don't have free copies in the shuffle
22158 // instructions or the ability to load as part of the instruction, so
22159 // canonicalize their shuffles to UNPCK or MOV variants.
22161 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22162 // vectors because it can have a load folded into it that UNPCK cannot. This
22163 // doesn't preclude something switching to the shorter encoding post-RA.
22165 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22166 bool Lo = Mask.equals(0, 0);
22169 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22170 // is no slower than UNPCKLPD but has the option to fold the input operand
22171 // into even an unaligned memory load.
22172 if (Lo && Subtarget->hasSSE3()) {
22173 Shuffle = X86ISD::MOVDDUP;
22174 ShuffleVT = MVT::v2f64;
22176 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22177 // than the UNPCK variants.
22178 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22179 ShuffleVT = MVT::v4f32;
22181 if (Depth == 1 && Root->getOpcode() == Shuffle)
22182 return false; // Nothing to do!
22183 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22184 DCI.AddToWorklist(Op.getNode());
22185 if (Shuffle == X86ISD::MOVDDUP)
22186 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22188 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22189 DCI.AddToWorklist(Op.getNode());
22190 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22194 if (Subtarget->hasSSE3() &&
22195 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22196 bool Lo = Mask.equals(0, 0, 2, 2);
22197 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22198 MVT ShuffleVT = MVT::v4f32;
22199 if (Depth == 1 && Root->getOpcode() == Shuffle)
22200 return false; // Nothing to do!
22201 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22202 DCI.AddToWorklist(Op.getNode());
22203 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22204 DCI.AddToWorklist(Op.getNode());
22205 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22209 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22210 bool Lo = Mask.equals(0, 0, 1, 1);
22211 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22212 MVT ShuffleVT = MVT::v4f32;
22213 if (Depth == 1 && Root->getOpcode() == Shuffle)
22214 return false; // Nothing to do!
22215 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22216 DCI.AddToWorklist(Op.getNode());
22217 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22218 DCI.AddToWorklist(Op.getNode());
22219 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22225 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22226 // variants as none of these have single-instruction variants that are
22227 // superior to the UNPCK formulation.
22228 if (!FloatDomain &&
22229 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22230 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22231 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22232 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22234 bool Lo = Mask[0] == 0;
22235 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22236 if (Depth == 1 && Root->getOpcode() == Shuffle)
22237 return false; // Nothing to do!
22239 switch (Mask.size()) {
22241 ShuffleVT = MVT::v8i16;
22244 ShuffleVT = MVT::v16i8;
22247 llvm_unreachable("Impossible mask size!");
22249 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22250 DCI.AddToWorklist(Op.getNode());
22251 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22252 DCI.AddToWorklist(Op.getNode());
22253 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22258 // Don't try to re-form single instruction chains under any circumstances now
22259 // that we've done encoding canonicalization for them.
22263 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22264 // can replace them with a single PSHUFB instruction profitably. Intel's
22265 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22266 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22267 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22268 SmallVector<SDValue, 16> PSHUFBMask;
22269 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22270 int Ratio = 16 / Mask.size();
22271 for (unsigned i = 0; i < 16; ++i) {
22272 if (Mask[i / Ratio] == SM_SentinelUndef) {
22273 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22276 int M = Mask[i / Ratio] != SM_SentinelZero
22277 ? Ratio * Mask[i / Ratio] + i % Ratio
22279 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22281 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22282 DCI.AddToWorklist(Op.getNode());
22283 SDValue PSHUFBMaskOp =
22284 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22285 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22286 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22287 DCI.AddToWorklist(Op.getNode());
22288 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22293 // Failed to find any combines.
22297 /// \brief Fully generic combining of x86 shuffle instructions.
22299 /// This should be the last combine run over the x86 shuffle instructions. Once
22300 /// they have been fully optimized, this will recursively consider all chains
22301 /// of single-use shuffle instructions, build a generic model of the cumulative
22302 /// shuffle operation, and check for simpler instructions which implement this
22303 /// operation. We use this primarily for two purposes:
22305 /// 1) Collapse generic shuffles to specialized single instructions when
22306 /// equivalent. In most cases, this is just an encoding size win, but
22307 /// sometimes we will collapse multiple generic shuffles into a single
22308 /// special-purpose shuffle.
22309 /// 2) Look for sequences of shuffle instructions with 3 or more total
22310 /// instructions, and replace them with the slightly more expensive SSSE3
22311 /// PSHUFB instruction if available. We do this as the last combining step
22312 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22313 /// a suitable short sequence of other instructions. The PHUFB will either
22314 /// use a register or have to read from memory and so is slightly (but only
22315 /// slightly) more expensive than the other shuffle instructions.
22317 /// Because this is inherently a quadratic operation (for each shuffle in
22318 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22319 /// This should never be an issue in practice as the shuffle lowering doesn't
22320 /// produce sequences of more than 8 instructions.
22322 /// FIXME: We will currently miss some cases where the redundant shuffling
22323 /// would simplify under the threshold for PSHUFB formation because of
22324 /// combine-ordering. To fix this, we should do the redundant instruction
22325 /// combining in this recursive walk.
22326 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22327 ArrayRef<int> RootMask,
22328 int Depth, bool HasPSHUFB,
22330 TargetLowering::DAGCombinerInfo &DCI,
22331 const X86Subtarget *Subtarget) {
22332 // Bound the depth of our recursive combine because this is ultimately
22333 // quadratic in nature.
22337 // Directly rip through bitcasts to find the underlying operand.
22338 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22339 Op = Op.getOperand(0);
22341 MVT VT = Op.getSimpleValueType();
22342 if (!VT.isVector())
22343 return false; // Bail if we hit a non-vector.
22344 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22345 // version should be added.
22346 if (VT.getSizeInBits() != 128)
22349 assert(Root.getSimpleValueType().isVector() &&
22350 "Shuffles operate on vector types!");
22351 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22352 "Can only combine shuffles of the same vector register size.");
22354 if (!isTargetShuffle(Op.getOpcode()))
22356 SmallVector<int, 16> OpMask;
22358 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22359 // We only can combine unary shuffles which we can decode the mask for.
22360 if (!HaveMask || !IsUnary)
22363 assert(VT.getVectorNumElements() == OpMask.size() &&
22364 "Different mask size from vector size!");
22365 assert(((RootMask.size() > OpMask.size() &&
22366 RootMask.size() % OpMask.size() == 0) ||
22367 (OpMask.size() > RootMask.size() &&
22368 OpMask.size() % RootMask.size() == 0) ||
22369 OpMask.size() == RootMask.size()) &&
22370 "The smaller number of elements must divide the larger.");
22371 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22372 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22373 assert(((RootRatio == 1 && OpRatio == 1) ||
22374 (RootRatio == 1) != (OpRatio == 1)) &&
22375 "Must not have a ratio for both incoming and op masks!");
22377 SmallVector<int, 16> Mask;
22378 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22380 // Merge this shuffle operation's mask into our accumulated mask. Note that
22381 // this shuffle's mask will be the first applied to the input, followed by the
22382 // root mask to get us all the way to the root value arrangement. The reason
22383 // for this order is that we are recursing up the operation chain.
22384 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22385 int RootIdx = i / RootRatio;
22386 if (RootMask[RootIdx] < 0) {
22387 // This is a zero or undef lane, we're done.
22388 Mask.push_back(RootMask[RootIdx]);
22392 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22393 int OpIdx = RootMaskedIdx / OpRatio;
22394 if (OpMask[OpIdx] < 0) {
22395 // The incoming lanes are zero or undef, it doesn't matter which ones we
22397 Mask.push_back(OpMask[OpIdx]);
22401 // Ok, we have non-zero lanes, map them through.
22402 Mask.push_back(OpMask[OpIdx] * OpRatio +
22403 RootMaskedIdx % OpRatio);
22406 // See if we can recurse into the operand to combine more things.
22407 switch (Op.getOpcode()) {
22408 case X86ISD::PSHUFB:
22410 case X86ISD::PSHUFD:
22411 case X86ISD::PSHUFHW:
22412 case X86ISD::PSHUFLW:
22413 if (Op.getOperand(0).hasOneUse() &&
22414 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22415 HasPSHUFB, DAG, DCI, Subtarget))
22419 case X86ISD::UNPCKL:
22420 case X86ISD::UNPCKH:
22421 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22422 // We can't check for single use, we have to check that this shuffle is the only user.
22423 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22424 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22425 HasPSHUFB, DAG, DCI, Subtarget))
22430 // Minor canonicalization of the accumulated shuffle mask to make it easier
22431 // to match below. All this does is detect masks with squential pairs of
22432 // elements, and shrink them to the half-width mask. It does this in a loop
22433 // so it will reduce the size of the mask to the minimal width mask which
22434 // performs an equivalent shuffle.
22435 SmallVector<int, 16> WidenedMask;
22436 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22437 Mask = std::move(WidenedMask);
22438 WidenedMask.clear();
22441 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22445 /// \brief Get the PSHUF-style mask from PSHUF node.
22447 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22448 /// PSHUF-style masks that can be reused with such instructions.
22449 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22450 SmallVector<int, 4> Mask;
22452 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22456 switch (N.getOpcode()) {
22457 case X86ISD::PSHUFD:
22459 case X86ISD::PSHUFLW:
22462 case X86ISD::PSHUFHW:
22463 Mask.erase(Mask.begin(), Mask.begin() + 4);
22464 for (int &M : Mask)
22468 llvm_unreachable("No valid shuffle instruction found!");
22472 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22474 /// We walk up the chain and look for a combinable shuffle, skipping over
22475 /// shuffles that we could hoist this shuffle's transformation past without
22476 /// altering anything.
22478 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22480 TargetLowering::DAGCombinerInfo &DCI) {
22481 assert(N.getOpcode() == X86ISD::PSHUFD &&
22482 "Called with something other than an x86 128-bit half shuffle!");
22485 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22486 // of the shuffles in the chain so that we can form a fresh chain to replace
22488 SmallVector<SDValue, 8> Chain;
22489 SDValue V = N.getOperand(0);
22490 for (; V.hasOneUse(); V = V.getOperand(0)) {
22491 switch (V.getOpcode()) {
22493 return SDValue(); // Nothing combined!
22496 // Skip bitcasts as we always know the type for the target specific
22500 case X86ISD::PSHUFD:
22501 // Found another dword shuffle.
22504 case X86ISD::PSHUFLW:
22505 // Check that the low words (being shuffled) are the identity in the
22506 // dword shuffle, and the high words are self-contained.
22507 if (Mask[0] != 0 || Mask[1] != 1 ||
22508 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22511 Chain.push_back(V);
22514 case X86ISD::PSHUFHW:
22515 // Check that the high words (being shuffled) are the identity in the
22516 // dword shuffle, and the low words are self-contained.
22517 if (Mask[2] != 2 || Mask[3] != 3 ||
22518 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22521 Chain.push_back(V);
22524 case X86ISD::UNPCKL:
22525 case X86ISD::UNPCKH:
22526 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22527 // shuffle into a preceding word shuffle.
22528 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22531 // Search for a half-shuffle which we can combine with.
22532 unsigned CombineOp =
22533 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22534 if (V.getOperand(0) != V.getOperand(1) ||
22535 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22537 Chain.push_back(V);
22538 V = V.getOperand(0);
22540 switch (V.getOpcode()) {
22542 return SDValue(); // Nothing to combine.
22544 case X86ISD::PSHUFLW:
22545 case X86ISD::PSHUFHW:
22546 if (V.getOpcode() == CombineOp)
22549 Chain.push_back(V);
22553 V = V.getOperand(0);
22557 } while (V.hasOneUse());
22560 // Break out of the loop if we break out of the switch.
22564 if (!V.hasOneUse())
22565 // We fell out of the loop without finding a viable combining instruction.
22568 // Merge this node's mask and our incoming mask.
22569 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22570 for (int &M : Mask)
22572 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22573 getV4X86ShuffleImm8ForMask(Mask, DAG));
22575 // Rebuild the chain around this new shuffle.
22576 while (!Chain.empty()) {
22577 SDValue W = Chain.pop_back_val();
22579 if (V.getValueType() != W.getOperand(0).getValueType())
22580 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22582 switch (W.getOpcode()) {
22584 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22586 case X86ISD::UNPCKL:
22587 case X86ISD::UNPCKH:
22588 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22591 case X86ISD::PSHUFD:
22592 case X86ISD::PSHUFLW:
22593 case X86ISD::PSHUFHW:
22594 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22598 if (V.getValueType() != N.getValueType())
22599 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22601 // Return the new chain to replace N.
22605 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22607 /// We walk up the chain, skipping shuffles of the other half and looking
22608 /// through shuffles which switch halves trying to find a shuffle of the same
22609 /// pair of dwords.
22610 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22612 TargetLowering::DAGCombinerInfo &DCI) {
22614 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22615 "Called with something other than an x86 128-bit half shuffle!");
22617 unsigned CombineOpcode = N.getOpcode();
22619 // Walk up a single-use chain looking for a combinable shuffle.
22620 SDValue V = N.getOperand(0);
22621 for (; V.hasOneUse(); V = V.getOperand(0)) {
22622 switch (V.getOpcode()) {
22624 return false; // Nothing combined!
22627 // Skip bitcasts as we always know the type for the target specific
22631 case X86ISD::PSHUFLW:
22632 case X86ISD::PSHUFHW:
22633 if (V.getOpcode() == CombineOpcode)
22636 // Other-half shuffles are no-ops.
22639 // Break out of the loop if we break out of the switch.
22643 if (!V.hasOneUse())
22644 // We fell out of the loop without finding a viable combining instruction.
22647 // Combine away the bottom node as its shuffle will be accumulated into
22648 // a preceding shuffle.
22649 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22651 // Record the old value.
22654 // Merge this node's mask and our incoming mask (adjusted to account for all
22655 // the pshufd instructions encountered).
22656 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22657 for (int &M : Mask)
22659 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22660 getV4X86ShuffleImm8ForMask(Mask, DAG));
22662 // Check that the shuffles didn't cancel each other out. If not, we need to
22663 // combine to the new one.
22665 // Replace the combinable shuffle with the combined one, updating all users
22666 // so that we re-evaluate the chain here.
22667 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22672 /// \brief Try to combine x86 target specific shuffles.
22673 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22674 TargetLowering::DAGCombinerInfo &DCI,
22675 const X86Subtarget *Subtarget) {
22677 MVT VT = N.getSimpleValueType();
22678 SmallVector<int, 4> Mask;
22680 switch (N.getOpcode()) {
22681 case X86ISD::PSHUFD:
22682 case X86ISD::PSHUFLW:
22683 case X86ISD::PSHUFHW:
22684 Mask = getPSHUFShuffleMask(N);
22685 assert(Mask.size() == 4);
22691 // Nuke no-op shuffles that show up after combining.
22692 if (isNoopShuffleMask(Mask))
22693 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22695 // Look for simplifications involving one or two shuffle instructions.
22696 SDValue V = N.getOperand(0);
22697 switch (N.getOpcode()) {
22700 case X86ISD::PSHUFLW:
22701 case X86ISD::PSHUFHW:
22702 assert(VT == MVT::v8i16);
22705 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22706 return SDValue(); // We combined away this shuffle, so we're done.
22708 // See if this reduces to a PSHUFD which is no more expensive and can
22709 // combine with more operations. Note that it has to at least flip the
22710 // dwords as otherwise it would have been removed as a no-op.
22711 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22712 int DMask[] = {0, 1, 2, 3};
22713 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22714 DMask[DOffset + 0] = DOffset + 1;
22715 DMask[DOffset + 1] = DOffset + 0;
22716 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22717 DCI.AddToWorklist(V.getNode());
22718 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22719 getV4X86ShuffleImm8ForMask(DMask, DAG));
22720 DCI.AddToWorklist(V.getNode());
22721 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22724 // Look for shuffle patterns which can be implemented as a single unpack.
22725 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22726 // only works when we have a PSHUFD followed by two half-shuffles.
22727 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22728 (V.getOpcode() == X86ISD::PSHUFLW ||
22729 V.getOpcode() == X86ISD::PSHUFHW) &&
22730 V.getOpcode() != N.getOpcode() &&
22732 SDValue D = V.getOperand(0);
22733 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22734 D = D.getOperand(0);
22735 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22736 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22737 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22738 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22739 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22741 for (int i = 0; i < 4; ++i) {
22742 WordMask[i + NOffset] = Mask[i] + NOffset;
22743 WordMask[i + VOffset] = VMask[i] + VOffset;
22745 // Map the word mask through the DWord mask.
22747 for (int i = 0; i < 8; ++i)
22748 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22749 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22750 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22751 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22752 std::begin(UnpackLoMask)) ||
22753 std::equal(std::begin(MappedMask), std::end(MappedMask),
22754 std::begin(UnpackHiMask))) {
22755 // We can replace all three shuffles with an unpack.
22756 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22757 DCI.AddToWorklist(V.getNode());
22758 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22760 DL, MVT::v8i16, V, V);
22767 case X86ISD::PSHUFD:
22768 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22777 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22779 /// We combine this directly on the abstract vector shuffle nodes so it is
22780 /// easier to generically match. We also insert dummy vector shuffle nodes for
22781 /// the operands which explicitly discard the lanes which are unused by this
22782 /// operation to try to flow through the rest of the combiner the fact that
22783 /// they're unused.
22784 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22786 EVT VT = N->getValueType(0);
22788 // We only handle target-independent shuffles.
22789 // FIXME: It would be easy and harmless to use the target shuffle mask
22790 // extraction tool to support more.
22791 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22794 auto *SVN = cast<ShuffleVectorSDNode>(N);
22795 ArrayRef<int> Mask = SVN->getMask();
22796 SDValue V1 = N->getOperand(0);
22797 SDValue V2 = N->getOperand(1);
22799 // We require the first shuffle operand to be the SUB node, and the second to
22800 // be the ADD node.
22801 // FIXME: We should support the commuted patterns.
22802 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22805 // If there are other uses of these operations we can't fold them.
22806 if (!V1->hasOneUse() || !V2->hasOneUse())
22809 // Ensure that both operations have the same operands. Note that we can
22810 // commute the FADD operands.
22811 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22812 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22813 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22816 // We're looking for blends between FADD and FSUB nodes. We insist on these
22817 // nodes being lined up in a specific expected pattern.
22818 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
22819 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
22820 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22823 // Only specific types are legal at this point, assert so we notice if and
22824 // when these change.
22825 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22826 VT == MVT::v4f64) &&
22827 "Unknown vector type encountered!");
22829 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22832 /// PerformShuffleCombine - Performs several different shuffle combines.
22833 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22834 TargetLowering::DAGCombinerInfo &DCI,
22835 const X86Subtarget *Subtarget) {
22837 SDValue N0 = N->getOperand(0);
22838 SDValue N1 = N->getOperand(1);
22839 EVT VT = N->getValueType(0);
22841 // Don't create instructions with illegal types after legalize types has run.
22842 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22843 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22846 // If we have legalized the vector types, look for blends of FADD and FSUB
22847 // nodes that we can fuse into an ADDSUB node.
22848 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22849 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22852 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22853 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22854 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22855 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22857 // During Type Legalization, when promoting illegal vector types,
22858 // the backend might introduce new shuffle dag nodes and bitcasts.
22860 // This code performs the following transformation:
22861 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22862 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22864 // We do this only if both the bitcast and the BINOP dag nodes have
22865 // one use. Also, perform this transformation only if the new binary
22866 // operation is legal. This is to avoid introducing dag nodes that
22867 // potentially need to be further expanded (or custom lowered) into a
22868 // less optimal sequence of dag nodes.
22869 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22870 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22871 N0.getOpcode() == ISD::BITCAST) {
22872 SDValue BC0 = N0.getOperand(0);
22873 EVT SVT = BC0.getValueType();
22874 unsigned Opcode = BC0.getOpcode();
22875 unsigned NumElts = VT.getVectorNumElements();
22877 if (BC0.hasOneUse() && SVT.isVector() &&
22878 SVT.getVectorNumElements() * 2 == NumElts &&
22879 TLI.isOperationLegal(Opcode, VT)) {
22880 bool CanFold = false;
22892 unsigned SVTNumElts = SVT.getVectorNumElements();
22893 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22894 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22895 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22896 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22897 CanFold = SVOp->getMaskElt(i) < 0;
22900 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22901 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22902 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22903 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22908 // Only handle 128 wide vector from here on.
22909 if (!VT.is128BitVector())
22912 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22913 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22914 // consecutive, non-overlapping, and in the right order.
22915 SmallVector<SDValue, 16> Elts;
22916 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22917 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22919 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22923 if (isTargetShuffle(N->getOpcode())) {
22925 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22926 if (Shuffle.getNode())
22929 // Try recursively combining arbitrary sequences of x86 shuffle
22930 // instructions into higher-order shuffles. We do this after combining
22931 // specific PSHUF instruction sequences into their minimal form so that we
22932 // can evaluate how many specialized shuffle instructions are involved in
22933 // a particular chain.
22934 SmallVector<int, 1> NonceMask; // Just a placeholder.
22935 NonceMask.push_back(0);
22936 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
22937 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
22939 return SDValue(); // This routine will use CombineTo to replace N.
22945 /// PerformTruncateCombine - Converts truncate operation to
22946 /// a sequence of vector shuffle operations.
22947 /// It is possible when we truncate 256-bit vector to 128-bit vector
22948 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
22949 TargetLowering::DAGCombinerInfo &DCI,
22950 const X86Subtarget *Subtarget) {
22954 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
22955 /// specific shuffle of a load can be folded into a single element load.
22956 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
22957 /// shuffles have been custom lowered so we need to handle those here.
22958 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
22959 TargetLowering::DAGCombinerInfo &DCI) {
22960 if (DCI.isBeforeLegalizeOps())
22963 SDValue InVec = N->getOperand(0);
22964 SDValue EltNo = N->getOperand(1);
22966 if (!isa<ConstantSDNode>(EltNo))
22969 EVT OriginalVT = InVec.getValueType();
22971 if (InVec.getOpcode() == ISD::BITCAST) {
22972 // Don't duplicate a load with other uses.
22973 if (!InVec.hasOneUse())
22975 EVT BCVT = InVec.getOperand(0).getValueType();
22976 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
22978 InVec = InVec.getOperand(0);
22981 EVT CurrentVT = InVec.getValueType();
22983 if (!isTargetShuffle(InVec.getOpcode()))
22986 // Don't duplicate a load with other uses.
22987 if (!InVec.hasOneUse())
22990 SmallVector<int, 16> ShuffleMask;
22992 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
22993 ShuffleMask, UnaryShuffle))
22996 // Select the input vector, guarding against out of range extract vector.
22997 unsigned NumElems = CurrentVT.getVectorNumElements();
22998 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
22999 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23000 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23001 : InVec.getOperand(1);
23003 // If inputs to shuffle are the same for both ops, then allow 2 uses
23004 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23005 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23007 if (LdNode.getOpcode() == ISD::BITCAST) {
23008 // Don't duplicate a load with other uses.
23009 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23012 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23013 LdNode = LdNode.getOperand(0);
23016 if (!ISD::isNormalLoad(LdNode.getNode()))
23019 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23021 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23024 EVT EltVT = N->getValueType(0);
23025 // If there's a bitcast before the shuffle, check if the load type and
23026 // alignment is valid.
23027 unsigned Align = LN0->getAlignment();
23028 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23029 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23030 EltVT.getTypeForEVT(*DAG.getContext()));
23032 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23035 // All checks match so transform back to vector_shuffle so that DAG combiner
23036 // can finish the job
23039 // Create shuffle node taking into account the case that its a unary shuffle
23040 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23041 : InVec.getOperand(1);
23042 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23043 InVec.getOperand(0), Shuffle,
23045 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23046 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23050 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23051 /// special and don't usually play with other vector types, it's better to
23052 /// handle them early to be sure we emit efficient code by avoiding
23053 /// store-load conversions.
23054 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23055 if (N->getValueType(0) != MVT::x86mmx ||
23056 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23057 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23060 SDValue V = N->getOperand(0);
23061 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23062 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23063 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23064 N->getValueType(0), V.getOperand(0));
23069 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23070 /// generation and convert it from being a bunch of shuffles and extracts
23071 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23072 /// storing the value and loading scalars back, while for x64 we should
23073 /// use 64-bit extracts and shifts.
23074 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23075 TargetLowering::DAGCombinerInfo &DCI) {
23076 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23077 if (NewOp.getNode())
23080 SDValue InputVector = N->getOperand(0);
23082 // Detect mmx to i32 conversion through a v2i32 elt extract.
23083 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23084 N->getValueType(0) == MVT::i32 &&
23085 InputVector.getValueType() == MVT::v2i32) {
23087 // The bitcast source is a direct mmx result.
23088 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23089 if (MMXSrc.getValueType() == MVT::x86mmx)
23090 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23091 N->getValueType(0),
23092 InputVector.getNode()->getOperand(0));
23094 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23095 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23096 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23097 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23098 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23099 MMXSrcOp.getValueType() == MVT::v1i64 &&
23100 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23101 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23102 N->getValueType(0),
23103 MMXSrcOp.getOperand(0));
23106 // Only operate on vectors of 4 elements, where the alternative shuffling
23107 // gets to be more expensive.
23108 if (InputVector.getValueType() != MVT::v4i32)
23111 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23112 // single use which is a sign-extend or zero-extend, and all elements are
23114 SmallVector<SDNode *, 4> Uses;
23115 unsigned ExtractedElements = 0;
23116 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23117 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23118 if (UI.getUse().getResNo() != InputVector.getResNo())
23121 SDNode *Extract = *UI;
23122 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23125 if (Extract->getValueType(0) != MVT::i32)
23127 if (!Extract->hasOneUse())
23129 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23130 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23132 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23135 // Record which element was extracted.
23136 ExtractedElements |=
23137 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23139 Uses.push_back(Extract);
23142 // If not all the elements were used, this may not be worthwhile.
23143 if (ExtractedElements != 15)
23146 // Ok, we've now decided to do the transformation.
23147 // If 64-bit shifts are legal, use the extract-shift sequence,
23148 // otherwise bounce the vector off the cache.
23149 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23151 SDLoc dl(InputVector);
23153 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23154 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23155 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23156 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23157 DAG.getConstant(0, VecIdxTy));
23158 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23159 DAG.getConstant(1, VecIdxTy));
23161 SDValue ShAmt = DAG.getConstant(32,
23162 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23163 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23164 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23165 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23166 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23167 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23168 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23170 // Store the value to a temporary stack slot.
23171 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23172 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23173 MachinePointerInfo(), false, false, 0);
23175 EVT ElementType = InputVector.getValueType().getVectorElementType();
23176 unsigned EltSize = ElementType.getSizeInBits() / 8;
23178 // Replace each use (extract) with a load of the appropriate element.
23179 for (unsigned i = 0; i < 4; ++i) {
23180 uint64_t Offset = EltSize * i;
23181 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23183 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23184 StackPtr, OffsetVal);
23186 // Load the scalar.
23187 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23188 ScalarAddr, MachinePointerInfo(),
23189 false, false, false, 0);
23194 // Replace the extracts
23195 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23196 UE = Uses.end(); UI != UE; ++UI) {
23197 SDNode *Extract = *UI;
23199 SDValue Idx = Extract->getOperand(1);
23200 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23201 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23204 // The replacement was made in place; don't return anything.
23208 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23209 static std::pair<unsigned, bool>
23210 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23211 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23212 if (!VT.isVector())
23213 return std::make_pair(0, false);
23215 bool NeedSplit = false;
23216 switch (VT.getSimpleVT().SimpleTy) {
23217 default: return std::make_pair(0, false);
23220 if (!Subtarget->hasVLX())
23221 return std::make_pair(0, false);
23225 if (!Subtarget->hasBWI())
23226 return std::make_pair(0, false);
23230 if (!Subtarget->hasAVX512())
23231 return std::make_pair(0, false);
23236 if (!Subtarget->hasAVX2())
23238 if (!Subtarget->hasAVX())
23239 return std::make_pair(0, false);
23244 if (!Subtarget->hasSSE2())
23245 return std::make_pair(0, false);
23248 // SSE2 has only a small subset of the operations.
23249 bool hasUnsigned = Subtarget->hasSSE41() ||
23250 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23251 bool hasSigned = Subtarget->hasSSE41() ||
23252 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23254 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23257 // Check for x CC y ? x : y.
23258 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23259 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23264 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23267 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23270 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23273 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23275 // Check for x CC y ? y : x -- a min/max with reversed arms.
23276 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23277 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23282 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23285 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23288 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23291 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23295 return std::make_pair(Opc, NeedSplit);
23299 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23300 const X86Subtarget *Subtarget) {
23302 SDValue Cond = N->getOperand(0);
23303 SDValue LHS = N->getOperand(1);
23304 SDValue RHS = N->getOperand(2);
23306 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23307 SDValue CondSrc = Cond->getOperand(0);
23308 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23309 Cond = CondSrc->getOperand(0);
23312 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23315 // A vselect where all conditions and data are constants can be optimized into
23316 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23317 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23318 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23321 unsigned MaskValue = 0;
23322 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23325 MVT VT = N->getSimpleValueType(0);
23326 unsigned NumElems = VT.getVectorNumElements();
23327 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23328 for (unsigned i = 0; i < NumElems; ++i) {
23329 // Be sure we emit undef where we can.
23330 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23331 ShuffleMask[i] = -1;
23333 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23336 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23337 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23339 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23342 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23344 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23345 TargetLowering::DAGCombinerInfo &DCI,
23346 const X86Subtarget *Subtarget) {
23348 SDValue Cond = N->getOperand(0);
23349 // Get the LHS/RHS of the select.
23350 SDValue LHS = N->getOperand(1);
23351 SDValue RHS = N->getOperand(2);
23352 EVT VT = LHS.getValueType();
23353 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23355 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23356 // instructions match the semantics of the common C idiom x<y?x:y but not
23357 // x<=y?x:y, because of how they handle negative zero (which can be
23358 // ignored in unsafe-math mode).
23359 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23360 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23361 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23362 (Subtarget->hasSSE2() ||
23363 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23364 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23366 unsigned Opcode = 0;
23367 // Check for x CC y ? x : y.
23368 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23369 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23373 // Converting this to a min would handle NaNs incorrectly, and swapping
23374 // the operands would cause it to handle comparisons between positive
23375 // and negative zero incorrectly.
23376 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23377 if (!DAG.getTarget().Options.UnsafeFPMath &&
23378 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23380 std::swap(LHS, RHS);
23382 Opcode = X86ISD::FMIN;
23385 // Converting this to a min would handle comparisons between positive
23386 // and negative zero incorrectly.
23387 if (!DAG.getTarget().Options.UnsafeFPMath &&
23388 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23390 Opcode = X86ISD::FMIN;
23393 // Converting this to a min would handle both negative zeros and NaNs
23394 // incorrectly, but we can swap the operands to fix both.
23395 std::swap(LHS, RHS);
23399 Opcode = X86ISD::FMIN;
23403 // Converting this to a max would handle comparisons between positive
23404 // and negative zero incorrectly.
23405 if (!DAG.getTarget().Options.UnsafeFPMath &&
23406 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23408 Opcode = X86ISD::FMAX;
23411 // Converting this to a max would handle NaNs incorrectly, and swapping
23412 // the operands would cause it to handle comparisons between positive
23413 // and negative zero incorrectly.
23414 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23415 if (!DAG.getTarget().Options.UnsafeFPMath &&
23416 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23418 std::swap(LHS, RHS);
23420 Opcode = X86ISD::FMAX;
23423 // Converting this to a max would handle both negative zeros and NaNs
23424 // incorrectly, but we can swap the operands to fix both.
23425 std::swap(LHS, RHS);
23429 Opcode = X86ISD::FMAX;
23432 // Check for x CC y ? y : x -- a min/max with reversed arms.
23433 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23434 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23438 // Converting this to a min would handle comparisons between positive
23439 // and negative zero incorrectly, and swapping the operands would
23440 // cause it to handle NaNs incorrectly.
23441 if (!DAG.getTarget().Options.UnsafeFPMath &&
23442 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23443 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23445 std::swap(LHS, RHS);
23447 Opcode = X86ISD::FMIN;
23450 // Converting this to a min would handle NaNs incorrectly.
23451 if (!DAG.getTarget().Options.UnsafeFPMath &&
23452 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23454 Opcode = X86ISD::FMIN;
23457 // Converting this to a min would handle both negative zeros and NaNs
23458 // incorrectly, but we can swap the operands to fix both.
23459 std::swap(LHS, RHS);
23463 Opcode = X86ISD::FMIN;
23467 // Converting this to a max would handle NaNs incorrectly.
23468 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23470 Opcode = X86ISD::FMAX;
23473 // Converting this to a max would handle comparisons between positive
23474 // and negative zero incorrectly, and swapping the operands would
23475 // cause it to handle NaNs incorrectly.
23476 if (!DAG.getTarget().Options.UnsafeFPMath &&
23477 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23478 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23480 std::swap(LHS, RHS);
23482 Opcode = X86ISD::FMAX;
23485 // Converting this to a max would handle both negative zeros and NaNs
23486 // incorrectly, but we can swap the operands to fix both.
23487 std::swap(LHS, RHS);
23491 Opcode = X86ISD::FMAX;
23497 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23500 EVT CondVT = Cond.getValueType();
23501 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23502 CondVT.getVectorElementType() == MVT::i1) {
23503 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23504 // lowering on KNL. In this case we convert it to
23505 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23506 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23507 // Since SKX these selects have a proper lowering.
23508 EVT OpVT = LHS.getValueType();
23509 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23510 (OpVT.getVectorElementType() == MVT::i8 ||
23511 OpVT.getVectorElementType() == MVT::i16) &&
23512 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23513 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23514 DCI.AddToWorklist(Cond.getNode());
23515 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23518 // If this is a select between two integer constants, try to do some
23520 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23521 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23522 // Don't do this for crazy integer types.
23523 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23524 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23525 // so that TrueC (the true value) is larger than FalseC.
23526 bool NeedsCondInvert = false;
23528 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23529 // Efficiently invertible.
23530 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23531 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23532 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23533 NeedsCondInvert = true;
23534 std::swap(TrueC, FalseC);
23537 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23538 if (FalseC->getAPIntValue() == 0 &&
23539 TrueC->getAPIntValue().isPowerOf2()) {
23540 if (NeedsCondInvert) // Invert the condition if needed.
23541 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23542 DAG.getConstant(1, Cond.getValueType()));
23544 // Zero extend the condition if needed.
23545 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23547 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23548 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23549 DAG.getConstant(ShAmt, MVT::i8));
23552 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23553 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23554 if (NeedsCondInvert) // Invert the condition if needed.
23555 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23556 DAG.getConstant(1, Cond.getValueType()));
23558 // Zero extend the condition if needed.
23559 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23560 FalseC->getValueType(0), Cond);
23561 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23562 SDValue(FalseC, 0));
23565 // Optimize cases that will turn into an LEA instruction. This requires
23566 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23567 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23568 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23569 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23571 bool isFastMultiplier = false;
23573 switch ((unsigned char)Diff) {
23575 case 1: // result = add base, cond
23576 case 2: // result = lea base( , cond*2)
23577 case 3: // result = lea base(cond, cond*2)
23578 case 4: // result = lea base( , cond*4)
23579 case 5: // result = lea base(cond, cond*4)
23580 case 8: // result = lea base( , cond*8)
23581 case 9: // result = lea base(cond, cond*8)
23582 isFastMultiplier = true;
23587 if (isFastMultiplier) {
23588 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23589 if (NeedsCondInvert) // Invert the condition if needed.
23590 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23591 DAG.getConstant(1, Cond.getValueType()));
23593 // Zero extend the condition if needed.
23594 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23596 // Scale the condition by the difference.
23598 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23599 DAG.getConstant(Diff, Cond.getValueType()));
23601 // Add the base if non-zero.
23602 if (FalseC->getAPIntValue() != 0)
23603 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23604 SDValue(FalseC, 0));
23611 // Canonicalize max and min:
23612 // (x > y) ? x : y -> (x >= y) ? x : y
23613 // (x < y) ? x : y -> (x <= y) ? x : y
23614 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23615 // the need for an extra compare
23616 // against zero. e.g.
23617 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23619 // testl %edi, %edi
23621 // cmovgl %edi, %eax
23625 // cmovsl %eax, %edi
23626 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23627 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23628 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23629 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23634 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23635 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23636 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23637 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23642 // Early exit check
23643 if (!TLI.isTypeLegal(VT))
23646 // Match VSELECTs into subs with unsigned saturation.
23647 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23648 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23649 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23650 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23651 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23653 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23654 // left side invert the predicate to simplify logic below.
23656 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23658 CC = ISD::getSetCCInverse(CC, true);
23659 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23663 if (Other.getNode() && Other->getNumOperands() == 2 &&
23664 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23665 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23666 SDValue CondRHS = Cond->getOperand(1);
23668 // Look for a general sub with unsigned saturation first.
23669 // x >= y ? x-y : 0 --> subus x, y
23670 // x > y ? x-y : 0 --> subus x, y
23671 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23672 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23673 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23675 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23676 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23677 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23678 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23679 // If the RHS is a constant we have to reverse the const
23680 // canonicalization.
23681 // x > C-1 ? x+-C : 0 --> subus x, C
23682 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23683 CondRHSConst->getAPIntValue() ==
23684 (-OpRHSConst->getAPIntValue() - 1))
23685 return DAG.getNode(
23686 X86ISD::SUBUS, DL, VT, OpLHS,
23687 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23689 // Another special case: If C was a sign bit, the sub has been
23690 // canonicalized into a xor.
23691 // FIXME: Would it be better to use computeKnownBits to determine
23692 // whether it's safe to decanonicalize the xor?
23693 // x s< 0 ? x^C : 0 --> subus x, C
23694 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23695 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23696 OpRHSConst->getAPIntValue().isSignBit())
23697 // Note that we have to rebuild the RHS constant here to ensure we
23698 // don't rely on particular values of undef lanes.
23699 return DAG.getNode(
23700 X86ISD::SUBUS, DL, VT, OpLHS,
23701 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23706 // Try to match a min/max vector operation.
23707 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23708 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23709 unsigned Opc = ret.first;
23710 bool NeedSplit = ret.second;
23712 if (Opc && NeedSplit) {
23713 unsigned NumElems = VT.getVectorNumElements();
23714 // Extract the LHS vectors
23715 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23716 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23718 // Extract the RHS vectors
23719 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23720 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23722 // Create min/max for each subvector
23723 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23724 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23726 // Merge the result
23727 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23729 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23732 // Simplify vector selection if condition value type matches vselect
23734 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23735 assert(Cond.getValueType().isVector() &&
23736 "vector select expects a vector selector!");
23738 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23739 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23741 // Try invert the condition if true value is not all 1s and false value
23743 if (!TValIsAllOnes && !FValIsAllZeros &&
23744 // Check if the selector will be produced by CMPP*/PCMP*
23745 Cond.getOpcode() == ISD::SETCC &&
23746 // Check if SETCC has already been promoted
23747 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23748 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23749 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23751 if (TValIsAllZeros || FValIsAllOnes) {
23752 SDValue CC = Cond.getOperand(2);
23753 ISD::CondCode NewCC =
23754 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23755 Cond.getOperand(0).getValueType().isInteger());
23756 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23757 std::swap(LHS, RHS);
23758 TValIsAllOnes = FValIsAllOnes;
23759 FValIsAllZeros = TValIsAllZeros;
23763 if (TValIsAllOnes || FValIsAllZeros) {
23766 if (TValIsAllOnes && FValIsAllZeros)
23768 else if (TValIsAllOnes)
23769 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23770 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23771 else if (FValIsAllZeros)
23772 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23773 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23775 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23779 // If we know that this node is legal then we know that it is going to be
23780 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23781 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23782 // to simplify previous instructions.
23783 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23784 !DCI.isBeforeLegalize() &&
23785 // We explicitly check against SSE4.1, v8i16 and v16i16 because, although
23786 // vselect nodes may be marked as Custom, they might only be legal when
23787 // Cond is a build_vector of constants. This will be taken care in
23788 // a later condition.
23789 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) &&
23790 Subtarget->hasSSE41() && VT != MVT::v16i16 && VT != MVT::v8i16) &&
23791 // Don't optimize vector of constants. Those are handled by
23792 // the generic code and all the bits must be properly set for
23793 // the generic optimizer.
23794 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23795 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23797 // Don't optimize vector selects that map to mask-registers.
23801 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23802 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23804 APInt KnownZero, KnownOne;
23805 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23806 DCI.isBeforeLegalizeOps());
23807 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23808 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23810 // If we changed the computation somewhere in the DAG, this change
23811 // will affect all users of Cond.
23812 // Make sure it is fine and update all the nodes so that we do not
23813 // use the generic VSELECT anymore. Otherwise, we may perform
23814 // wrong optimizations as we messed up with the actual expectation
23815 // for the vector boolean values.
23816 if (Cond != TLO.Old) {
23817 // Check all uses of that condition operand to check whether it will be
23818 // consumed by non-BLEND instructions, which may depend on all bits are
23820 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23822 if (I->getOpcode() != ISD::VSELECT)
23823 // TODO: Add other opcodes eventually lowered into BLEND.
23826 // Update all the users of the condition, before committing the change,
23827 // so that the VSELECT optimizations that expect the correct vector
23828 // boolean value will not be triggered.
23829 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23831 DAG.ReplaceAllUsesOfValueWith(
23833 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23834 Cond, I->getOperand(1), I->getOperand(2)));
23835 DCI.CommitTargetLoweringOpt(TLO);
23838 // At this point, only Cond is changed. Change the condition
23839 // just for N to keep the opportunity to optimize all other
23840 // users their own way.
23841 DAG.ReplaceAllUsesOfValueWith(
23843 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23844 TLO.New, N->getOperand(1), N->getOperand(2)));
23849 // We should generate an X86ISD::BLENDI from a vselect if its argument
23850 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23851 // constants. This specific pattern gets generated when we split a
23852 // selector for a 512 bit vector in a machine without AVX512 (but with
23853 // 256-bit vectors), during legalization:
23855 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23857 // Iff we find this pattern and the build_vectors are built from
23858 // constants, we translate the vselect into a shuffle_vector that we
23859 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23860 if ((N->getOpcode() == ISD::VSELECT ||
23861 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23862 !DCI.isBeforeLegalize()) {
23863 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23864 if (Shuffle.getNode())
23871 // Check whether a boolean test is testing a boolean value generated by
23872 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23875 // Simplify the following patterns:
23876 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23877 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23878 // to (Op EFLAGS Cond)
23880 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23881 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23882 // to (Op EFLAGS !Cond)
23884 // where Op could be BRCOND or CMOV.
23886 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23887 // Quit if not CMP and SUB with its value result used.
23888 if (Cmp.getOpcode() != X86ISD::CMP &&
23889 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23892 // Quit if not used as a boolean value.
23893 if (CC != X86::COND_E && CC != X86::COND_NE)
23896 // Check CMP operands. One of them should be 0 or 1 and the other should be
23897 // an SetCC or extended from it.
23898 SDValue Op1 = Cmp.getOperand(0);
23899 SDValue Op2 = Cmp.getOperand(1);
23902 const ConstantSDNode* C = nullptr;
23903 bool needOppositeCond = (CC == X86::COND_E);
23904 bool checkAgainstTrue = false; // Is it a comparison against 1?
23906 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23908 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23910 else // Quit if all operands are not constants.
23913 if (C->getZExtValue() == 1) {
23914 needOppositeCond = !needOppositeCond;
23915 checkAgainstTrue = true;
23916 } else if (C->getZExtValue() != 0)
23917 // Quit if the constant is neither 0 or 1.
23920 bool truncatedToBoolWithAnd = false;
23921 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23922 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23923 SetCC.getOpcode() == ISD::TRUNCATE ||
23924 SetCC.getOpcode() == ISD::AND) {
23925 if (SetCC.getOpcode() == ISD::AND) {
23927 ConstantSDNode *CS;
23928 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23929 CS->getZExtValue() == 1)
23931 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23932 CS->getZExtValue() == 1)
23936 SetCC = SetCC.getOperand(OpIdx);
23937 truncatedToBoolWithAnd = true;
23939 SetCC = SetCC.getOperand(0);
23942 switch (SetCC.getOpcode()) {
23943 case X86ISD::SETCC_CARRY:
23944 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
23945 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
23946 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
23947 // truncated to i1 using 'and'.
23948 if (checkAgainstTrue && !truncatedToBoolWithAnd)
23950 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
23951 "Invalid use of SETCC_CARRY!");
23953 case X86ISD::SETCC:
23954 // Set the condition code or opposite one if necessary.
23955 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
23956 if (needOppositeCond)
23957 CC = X86::GetOppositeBranchCondition(CC);
23958 return SetCC.getOperand(1);
23959 case X86ISD::CMOV: {
23960 // Check whether false/true value has canonical one, i.e. 0 or 1.
23961 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
23962 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
23963 // Quit if true value is not a constant.
23966 // Quit if false value is not a constant.
23968 SDValue Op = SetCC.getOperand(0);
23969 // Skip 'zext' or 'trunc' node.
23970 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
23971 Op.getOpcode() == ISD::TRUNCATE)
23972 Op = Op.getOperand(0);
23973 // A special case for rdrand/rdseed, where 0 is set if false cond is
23975 if ((Op.getOpcode() != X86ISD::RDRAND &&
23976 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
23979 // Quit if false value is not the constant 0 or 1.
23980 bool FValIsFalse = true;
23981 if (FVal && FVal->getZExtValue() != 0) {
23982 if (FVal->getZExtValue() != 1)
23984 // If FVal is 1, opposite cond is needed.
23985 needOppositeCond = !needOppositeCond;
23986 FValIsFalse = false;
23988 // Quit if TVal is not the constant opposite of FVal.
23989 if (FValIsFalse && TVal->getZExtValue() != 1)
23991 if (!FValIsFalse && TVal->getZExtValue() != 0)
23993 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
23994 if (needOppositeCond)
23995 CC = X86::GetOppositeBranchCondition(CC);
23996 return SetCC.getOperand(3);
24003 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24004 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24005 TargetLowering::DAGCombinerInfo &DCI,
24006 const X86Subtarget *Subtarget) {
24009 // If the flag operand isn't dead, don't touch this CMOV.
24010 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24013 SDValue FalseOp = N->getOperand(0);
24014 SDValue TrueOp = N->getOperand(1);
24015 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24016 SDValue Cond = N->getOperand(3);
24018 if (CC == X86::COND_E || CC == X86::COND_NE) {
24019 switch (Cond.getOpcode()) {
24023 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24024 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24025 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24031 Flags = checkBoolTestSetCCCombine(Cond, CC);
24032 if (Flags.getNode() &&
24033 // Extra check as FCMOV only supports a subset of X86 cond.
24034 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24035 SDValue Ops[] = { FalseOp, TrueOp,
24036 DAG.getConstant(CC, MVT::i8), Flags };
24037 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24040 // If this is a select between two integer constants, try to do some
24041 // optimizations. Note that the operands are ordered the opposite of SELECT
24043 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24044 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24045 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24046 // larger than FalseC (the false value).
24047 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24048 CC = X86::GetOppositeBranchCondition(CC);
24049 std::swap(TrueC, FalseC);
24050 std::swap(TrueOp, FalseOp);
24053 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24054 // This is efficient for any integer data type (including i8/i16) and
24056 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24057 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24058 DAG.getConstant(CC, MVT::i8), Cond);
24060 // Zero extend the condition if needed.
24061 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24063 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24064 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24065 DAG.getConstant(ShAmt, MVT::i8));
24066 if (N->getNumValues() == 2) // Dead flag value?
24067 return DCI.CombineTo(N, Cond, SDValue());
24071 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24072 // for any integer data type, including i8/i16.
24073 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24074 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24075 DAG.getConstant(CC, MVT::i8), Cond);
24077 // Zero extend the condition if needed.
24078 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24079 FalseC->getValueType(0), Cond);
24080 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24081 SDValue(FalseC, 0));
24083 if (N->getNumValues() == 2) // Dead flag value?
24084 return DCI.CombineTo(N, Cond, SDValue());
24088 // Optimize cases that will turn into an LEA instruction. This requires
24089 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24090 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24091 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24092 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24094 bool isFastMultiplier = false;
24096 switch ((unsigned char)Diff) {
24098 case 1: // result = add base, cond
24099 case 2: // result = lea base( , cond*2)
24100 case 3: // result = lea base(cond, cond*2)
24101 case 4: // result = lea base( , cond*4)
24102 case 5: // result = lea base(cond, cond*4)
24103 case 8: // result = lea base( , cond*8)
24104 case 9: // result = lea base(cond, cond*8)
24105 isFastMultiplier = true;
24110 if (isFastMultiplier) {
24111 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24112 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24113 DAG.getConstant(CC, MVT::i8), Cond);
24114 // Zero extend the condition if needed.
24115 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24117 // Scale the condition by the difference.
24119 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24120 DAG.getConstant(Diff, Cond.getValueType()));
24122 // Add the base if non-zero.
24123 if (FalseC->getAPIntValue() != 0)
24124 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24125 SDValue(FalseC, 0));
24126 if (N->getNumValues() == 2) // Dead flag value?
24127 return DCI.CombineTo(N, Cond, SDValue());
24134 // Handle these cases:
24135 // (select (x != c), e, c) -> select (x != c), e, x),
24136 // (select (x == c), c, e) -> select (x == c), x, e)
24137 // where the c is an integer constant, and the "select" is the combination
24138 // of CMOV and CMP.
24140 // The rationale for this change is that the conditional-move from a constant
24141 // needs two instructions, however, conditional-move from a register needs
24142 // only one instruction.
24144 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24145 // some instruction-combining opportunities. This opt needs to be
24146 // postponed as late as possible.
24148 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24149 // the DCI.xxxx conditions are provided to postpone the optimization as
24150 // late as possible.
24152 ConstantSDNode *CmpAgainst = nullptr;
24153 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24154 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24155 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24157 if (CC == X86::COND_NE &&
24158 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24159 CC = X86::GetOppositeBranchCondition(CC);
24160 std::swap(TrueOp, FalseOp);
24163 if (CC == X86::COND_E &&
24164 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24165 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24166 DAG.getConstant(CC, MVT::i8), Cond };
24167 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24175 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24176 const X86Subtarget *Subtarget) {
24177 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24179 default: return SDValue();
24180 // SSE/AVX/AVX2 blend intrinsics.
24181 case Intrinsic::x86_avx2_pblendvb:
24182 case Intrinsic::x86_avx2_pblendw:
24183 case Intrinsic::x86_avx2_pblendd_128:
24184 case Intrinsic::x86_avx2_pblendd_256:
24185 // Don't try to simplify this intrinsic if we don't have AVX2.
24186 if (!Subtarget->hasAVX2())
24189 case Intrinsic::x86_avx_blend_pd_256:
24190 case Intrinsic::x86_avx_blend_ps_256:
24191 case Intrinsic::x86_avx_blendv_pd_256:
24192 case Intrinsic::x86_avx_blendv_ps_256:
24193 // Don't try to simplify this intrinsic if we don't have AVX.
24194 if (!Subtarget->hasAVX())
24197 case Intrinsic::x86_sse41_pblendw:
24198 case Intrinsic::x86_sse41_blendpd:
24199 case Intrinsic::x86_sse41_blendps:
24200 case Intrinsic::x86_sse41_blendvps:
24201 case Intrinsic::x86_sse41_blendvpd:
24202 case Intrinsic::x86_sse41_pblendvb: {
24203 SDValue Op0 = N->getOperand(1);
24204 SDValue Op1 = N->getOperand(2);
24205 SDValue Mask = N->getOperand(3);
24207 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24208 if (!Subtarget->hasSSE41())
24211 // fold (blend A, A, Mask) -> A
24214 // fold (blend A, B, allZeros) -> A
24215 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24217 // fold (blend A, B, allOnes) -> B
24218 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24221 // Simplify the case where the mask is a constant i32 value.
24222 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24223 if (C->isNullValue())
24225 if (C->isAllOnesValue())
24232 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24233 case Intrinsic::x86_sse2_psrai_w:
24234 case Intrinsic::x86_sse2_psrai_d:
24235 case Intrinsic::x86_avx2_psrai_w:
24236 case Intrinsic::x86_avx2_psrai_d:
24237 case Intrinsic::x86_sse2_psra_w:
24238 case Intrinsic::x86_sse2_psra_d:
24239 case Intrinsic::x86_avx2_psra_w:
24240 case Intrinsic::x86_avx2_psra_d: {
24241 SDValue Op0 = N->getOperand(1);
24242 SDValue Op1 = N->getOperand(2);
24243 EVT VT = Op0.getValueType();
24244 assert(VT.isVector() && "Expected a vector type!");
24246 if (isa<BuildVectorSDNode>(Op1))
24247 Op1 = Op1.getOperand(0);
24249 if (!isa<ConstantSDNode>(Op1))
24252 EVT SVT = VT.getVectorElementType();
24253 unsigned SVTBits = SVT.getSizeInBits();
24255 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24256 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24257 uint64_t ShAmt = C.getZExtValue();
24259 // Don't try to convert this shift into a ISD::SRA if the shift
24260 // count is bigger than or equal to the element size.
24261 if (ShAmt >= SVTBits)
24264 // Trivial case: if the shift count is zero, then fold this
24265 // into the first operand.
24269 // Replace this packed shift intrinsic with a target independent
24271 SDValue Splat = DAG.getConstant(C, VT);
24272 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24277 /// PerformMulCombine - Optimize a single multiply with constant into two
24278 /// in order to implement it with two cheaper instructions, e.g.
24279 /// LEA + SHL, LEA + LEA.
24280 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24281 TargetLowering::DAGCombinerInfo &DCI) {
24282 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24285 EVT VT = N->getValueType(0);
24286 if (VT != MVT::i64 && VT != MVT::i32)
24289 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24292 uint64_t MulAmt = C->getZExtValue();
24293 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24296 uint64_t MulAmt1 = 0;
24297 uint64_t MulAmt2 = 0;
24298 if ((MulAmt % 9) == 0) {
24300 MulAmt2 = MulAmt / 9;
24301 } else if ((MulAmt % 5) == 0) {
24303 MulAmt2 = MulAmt / 5;
24304 } else if ((MulAmt % 3) == 0) {
24306 MulAmt2 = MulAmt / 3;
24309 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24312 if (isPowerOf2_64(MulAmt2) &&
24313 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24314 // If second multiplifer is pow2, issue it first. We want the multiply by
24315 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24317 std::swap(MulAmt1, MulAmt2);
24320 if (isPowerOf2_64(MulAmt1))
24321 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24322 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24324 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24325 DAG.getConstant(MulAmt1, VT));
24327 if (isPowerOf2_64(MulAmt2))
24328 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24329 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24331 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24332 DAG.getConstant(MulAmt2, VT));
24334 // Do not add new nodes to DAG combiner worklist.
24335 DCI.CombineTo(N, NewMul, false);
24340 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24341 SDValue N0 = N->getOperand(0);
24342 SDValue N1 = N->getOperand(1);
24343 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24344 EVT VT = N0.getValueType();
24346 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24347 // since the result of setcc_c is all zero's or all ones.
24348 if (VT.isInteger() && !VT.isVector() &&
24349 N1C && N0.getOpcode() == ISD::AND &&
24350 N0.getOperand(1).getOpcode() == ISD::Constant) {
24351 SDValue N00 = N0.getOperand(0);
24352 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24353 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24354 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24355 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24356 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24357 APInt ShAmt = N1C->getAPIntValue();
24358 Mask = Mask.shl(ShAmt);
24360 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24361 N00, DAG.getConstant(Mask, VT));
24365 // Hardware support for vector shifts is sparse which makes us scalarize the
24366 // vector operations in many cases. Also, on sandybridge ADD is faster than
24368 // (shl V, 1) -> add V,V
24369 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24370 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24371 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24372 // We shift all of the values by one. In many cases we do not have
24373 // hardware support for this operation. This is better expressed as an ADD
24375 if (N1SplatC->getZExtValue() == 1)
24376 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24382 /// \brief Returns a vector of 0s if the node in input is a vector logical
24383 /// shift by a constant amount which is known to be bigger than or equal
24384 /// to the vector element size in bits.
24385 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24386 const X86Subtarget *Subtarget) {
24387 EVT VT = N->getValueType(0);
24389 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24390 (!Subtarget->hasInt256() ||
24391 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24394 SDValue Amt = N->getOperand(1);
24396 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24397 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24398 APInt ShiftAmt = AmtSplat->getAPIntValue();
24399 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24401 // SSE2/AVX2 logical shifts always return a vector of 0s
24402 // if the shift amount is bigger than or equal to
24403 // the element size. The constant shift amount will be
24404 // encoded as a 8-bit immediate.
24405 if (ShiftAmt.trunc(8).uge(MaxAmount))
24406 return getZeroVector(VT, Subtarget, DAG, DL);
24412 /// PerformShiftCombine - Combine shifts.
24413 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24414 TargetLowering::DAGCombinerInfo &DCI,
24415 const X86Subtarget *Subtarget) {
24416 if (N->getOpcode() == ISD::SHL) {
24417 SDValue V = PerformSHLCombine(N, DAG);
24418 if (V.getNode()) return V;
24421 if (N->getOpcode() != ISD::SRA) {
24422 // Try to fold this logical shift into a zero vector.
24423 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24424 if (V.getNode()) return V;
24430 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24431 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24432 // and friends. Likewise for OR -> CMPNEQSS.
24433 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24434 TargetLowering::DAGCombinerInfo &DCI,
24435 const X86Subtarget *Subtarget) {
24438 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24439 // we're requiring SSE2 for both.
24440 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24441 SDValue N0 = N->getOperand(0);
24442 SDValue N1 = N->getOperand(1);
24443 SDValue CMP0 = N0->getOperand(1);
24444 SDValue CMP1 = N1->getOperand(1);
24447 // The SETCCs should both refer to the same CMP.
24448 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24451 SDValue CMP00 = CMP0->getOperand(0);
24452 SDValue CMP01 = CMP0->getOperand(1);
24453 EVT VT = CMP00.getValueType();
24455 if (VT == MVT::f32 || VT == MVT::f64) {
24456 bool ExpectingFlags = false;
24457 // Check for any users that want flags:
24458 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24459 !ExpectingFlags && UI != UE; ++UI)
24460 switch (UI->getOpcode()) {
24465 ExpectingFlags = true;
24467 case ISD::CopyToReg:
24468 case ISD::SIGN_EXTEND:
24469 case ISD::ZERO_EXTEND:
24470 case ISD::ANY_EXTEND:
24474 if (!ExpectingFlags) {
24475 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24476 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24478 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24479 X86::CondCode tmp = cc0;
24484 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24485 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24486 // FIXME: need symbolic constants for these magic numbers.
24487 // See X86ATTInstPrinter.cpp:printSSECC().
24488 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24489 if (Subtarget->hasAVX512()) {
24490 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24491 CMP01, DAG.getConstant(x86cc, MVT::i8));
24492 if (N->getValueType(0) != MVT::i1)
24493 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24497 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24498 CMP00.getValueType(), CMP00, CMP01,
24499 DAG.getConstant(x86cc, MVT::i8));
24501 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24502 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24504 if (is64BitFP && !Subtarget->is64Bit()) {
24505 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24506 // 64-bit integer, since that's not a legal type. Since
24507 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24508 // bits, but can do this little dance to extract the lowest 32 bits
24509 // and work with those going forward.
24510 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24512 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24514 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24515 Vector32, DAG.getIntPtrConstant(0));
24519 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24520 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24521 DAG.getConstant(1, IntVT));
24522 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24523 return OneBitOfTruth;
24531 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24532 /// so it can be folded inside ANDNP.
24533 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24534 EVT VT = N->getValueType(0);
24536 // Match direct AllOnes for 128 and 256-bit vectors
24537 if (ISD::isBuildVectorAllOnes(N))
24540 // Look through a bit convert.
24541 if (N->getOpcode() == ISD::BITCAST)
24542 N = N->getOperand(0).getNode();
24544 // Sometimes the operand may come from a insert_subvector building a 256-bit
24546 if (VT.is256BitVector() &&
24547 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24548 SDValue V1 = N->getOperand(0);
24549 SDValue V2 = N->getOperand(1);
24551 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24552 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24553 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24554 ISD::isBuildVectorAllOnes(V2.getNode()))
24561 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24562 // register. In most cases we actually compare or select YMM-sized registers
24563 // and mixing the two types creates horrible code. This method optimizes
24564 // some of the transition sequences.
24565 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24566 TargetLowering::DAGCombinerInfo &DCI,
24567 const X86Subtarget *Subtarget) {
24568 EVT VT = N->getValueType(0);
24569 if (!VT.is256BitVector())
24572 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24573 N->getOpcode() == ISD::ZERO_EXTEND ||
24574 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24576 SDValue Narrow = N->getOperand(0);
24577 EVT NarrowVT = Narrow->getValueType(0);
24578 if (!NarrowVT.is128BitVector())
24581 if (Narrow->getOpcode() != ISD::XOR &&
24582 Narrow->getOpcode() != ISD::AND &&
24583 Narrow->getOpcode() != ISD::OR)
24586 SDValue N0 = Narrow->getOperand(0);
24587 SDValue N1 = Narrow->getOperand(1);
24590 // The Left side has to be a trunc.
24591 if (N0.getOpcode() != ISD::TRUNCATE)
24594 // The type of the truncated inputs.
24595 EVT WideVT = N0->getOperand(0)->getValueType(0);
24599 // The right side has to be a 'trunc' or a constant vector.
24600 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24601 ConstantSDNode *RHSConstSplat = nullptr;
24602 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24603 RHSConstSplat = RHSBV->getConstantSplatNode();
24604 if (!RHSTrunc && !RHSConstSplat)
24607 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24609 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24612 // Set N0 and N1 to hold the inputs to the new wide operation.
24613 N0 = N0->getOperand(0);
24614 if (RHSConstSplat) {
24615 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24616 SDValue(RHSConstSplat, 0));
24617 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24618 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24619 } else if (RHSTrunc) {
24620 N1 = N1->getOperand(0);
24623 // Generate the wide operation.
24624 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24625 unsigned Opcode = N->getOpcode();
24627 case ISD::ANY_EXTEND:
24629 case ISD::ZERO_EXTEND: {
24630 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24631 APInt Mask = APInt::getAllOnesValue(InBits);
24632 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24633 return DAG.getNode(ISD::AND, DL, VT,
24634 Op, DAG.getConstant(Mask, VT));
24636 case ISD::SIGN_EXTEND:
24637 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24638 Op, DAG.getValueType(NarrowVT));
24640 llvm_unreachable("Unexpected opcode");
24644 static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
24645 TargetLowering::DAGCombinerInfo &DCI,
24646 const X86Subtarget *Subtarget) {
24647 SDValue N0 = N->getOperand(0);
24648 SDValue N1 = N->getOperand(1);
24651 // A vector zext_in_reg may be represented as a shuffle,
24652 // feeding into a bitcast (this represents anyext) feeding into
24653 // an and with a mask.
24654 // We'd like to try to combine that into a shuffle with zero
24655 // plus a bitcast, removing the and.
24656 if (N0.getOpcode() != ISD::BITCAST ||
24657 N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
24660 // The other side of the AND should be a splat of 2^C, where C
24661 // is the number of bits in the source type.
24662 if (N1.getOpcode() == ISD::BITCAST)
24663 N1 = N1.getOperand(0);
24664 if (N1.getOpcode() != ISD::BUILD_VECTOR)
24666 BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
24668 ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
24669 EVT SrcType = Shuffle->getValueType(0);
24671 // We expect a single-source shuffle
24672 if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
24675 unsigned SrcSize = SrcType.getScalarSizeInBits();
24677 APInt SplatValue, SplatUndef;
24678 unsigned SplatBitSize;
24680 if (!Vector->isConstantSplat(SplatValue, SplatUndef,
24681 SplatBitSize, HasAnyUndefs))
24684 unsigned ResSize = N1.getValueType().getScalarSizeInBits();
24685 // Make sure the splat matches the mask we expect
24686 if (SplatBitSize > ResSize ||
24687 (SplatValue + 1).exactLogBase2() != (int)SrcSize)
24690 // Make sure the input and output size make sense
24691 if (SrcSize >= ResSize || ResSize % SrcSize)
24694 // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
24695 // The number of u's between each two values depends on the ratio between
24696 // the source and dest type.
24697 unsigned ZextRatio = ResSize / SrcSize;
24698 bool IsZext = true;
24699 for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
24700 if (i % ZextRatio) {
24701 if (Shuffle->getMaskElt(i) > 0) {
24707 if (Shuffle->getMaskElt(i) != (int)(i / ZextRatio)) {
24708 // Expected element number
24718 // Ok, perform the transformation - replace the shuffle with
24719 // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
24720 // (instead of undef) where the k elements come from the zero vector.
24721 SmallVector<int, 8> Mask;
24722 unsigned NumElems = SrcType.getVectorNumElements();
24723 for (unsigned i = 0; i < NumElems; ++i)
24725 Mask.push_back(NumElems);
24727 Mask.push_back(i / ZextRatio);
24729 SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
24730 Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask);
24731 return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
24734 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24735 TargetLowering::DAGCombinerInfo &DCI,
24736 const X86Subtarget *Subtarget) {
24737 if (DCI.isBeforeLegalizeOps())
24740 SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget);
24741 if (Zext.getNode())
24744 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24748 EVT VT = N->getValueType(0);
24749 SDValue N0 = N->getOperand(0);
24750 SDValue N1 = N->getOperand(1);
24753 // Create BEXTR instructions
24754 // BEXTR is ((X >> imm) & (2**size-1))
24755 if (VT == MVT::i32 || VT == MVT::i64) {
24756 // Check for BEXTR.
24757 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24758 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24759 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24760 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24761 if (MaskNode && ShiftNode) {
24762 uint64_t Mask = MaskNode->getZExtValue();
24763 uint64_t Shift = ShiftNode->getZExtValue();
24764 if (isMask_64(Mask)) {
24765 uint64_t MaskSize = countPopulation(Mask);
24766 if (Shift + MaskSize <= VT.getSizeInBits())
24767 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24768 DAG.getConstant(Shift | (MaskSize << 8), VT));
24776 // Want to form ANDNP nodes:
24777 // 1) In the hopes of then easily combining them with OR and AND nodes
24778 // to form PBLEND/PSIGN.
24779 // 2) To match ANDN packed intrinsics
24780 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24783 // Check LHS for vnot
24784 if (N0.getOpcode() == ISD::XOR &&
24785 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24786 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24787 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24789 // Check RHS for vnot
24790 if (N1.getOpcode() == ISD::XOR &&
24791 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24792 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24793 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24798 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24799 TargetLowering::DAGCombinerInfo &DCI,
24800 const X86Subtarget *Subtarget) {
24801 if (DCI.isBeforeLegalizeOps())
24804 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24808 SDValue N0 = N->getOperand(0);
24809 SDValue N1 = N->getOperand(1);
24810 EVT VT = N->getValueType(0);
24812 // look for psign/blend
24813 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24814 if (!Subtarget->hasSSSE3() ||
24815 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24818 // Canonicalize pandn to RHS
24819 if (N0.getOpcode() == X86ISD::ANDNP)
24821 // or (and (m, y), (pandn m, x))
24822 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24823 SDValue Mask = N1.getOperand(0);
24824 SDValue X = N1.getOperand(1);
24826 if (N0.getOperand(0) == Mask)
24827 Y = N0.getOperand(1);
24828 if (N0.getOperand(1) == Mask)
24829 Y = N0.getOperand(0);
24831 // Check to see if the mask appeared in both the AND and ANDNP and
24835 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24836 // Look through mask bitcast.
24837 if (Mask.getOpcode() == ISD::BITCAST)
24838 Mask = Mask.getOperand(0);
24839 if (X.getOpcode() == ISD::BITCAST)
24840 X = X.getOperand(0);
24841 if (Y.getOpcode() == ISD::BITCAST)
24842 Y = Y.getOperand(0);
24844 EVT MaskVT = Mask.getValueType();
24846 // Validate that the Mask operand is a vector sra node.
24847 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24848 // there is no psrai.b
24849 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24850 unsigned SraAmt = ~0;
24851 if (Mask.getOpcode() == ISD::SRA) {
24852 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24853 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24854 SraAmt = AmtConst->getZExtValue();
24855 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24856 SDValue SraC = Mask.getOperand(1);
24857 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24859 if ((SraAmt + 1) != EltBits)
24864 // Now we know we at least have a plendvb with the mask val. See if
24865 // we can form a psignb/w/d.
24866 // psign = x.type == y.type == mask.type && y = sub(0, x);
24867 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24868 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24869 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24870 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24871 "Unsupported VT for PSIGN");
24872 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24873 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24875 // PBLENDVB only available on SSE 4.1
24876 if (!Subtarget->hasSSE41())
24879 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24881 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24882 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24883 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24884 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24885 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24889 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24892 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24893 MachineFunction &MF = DAG.getMachineFunction();
24895 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
24897 // SHLD/SHRD instructions have lower register pressure, but on some
24898 // platforms they have higher latency than the equivalent
24899 // series of shifts/or that would otherwise be generated.
24900 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24901 // have higher latencies and we are not optimizing for size.
24902 if (!OptForSize && Subtarget->isSHLDSlow())
24905 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24907 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24909 if (!N0.hasOneUse() || !N1.hasOneUse())
24912 SDValue ShAmt0 = N0.getOperand(1);
24913 if (ShAmt0.getValueType() != MVT::i8)
24915 SDValue ShAmt1 = N1.getOperand(1);
24916 if (ShAmt1.getValueType() != MVT::i8)
24918 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24919 ShAmt0 = ShAmt0.getOperand(0);
24920 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24921 ShAmt1 = ShAmt1.getOperand(0);
24924 unsigned Opc = X86ISD::SHLD;
24925 SDValue Op0 = N0.getOperand(0);
24926 SDValue Op1 = N1.getOperand(0);
24927 if (ShAmt0.getOpcode() == ISD::SUB) {
24928 Opc = X86ISD::SHRD;
24929 std::swap(Op0, Op1);
24930 std::swap(ShAmt0, ShAmt1);
24933 unsigned Bits = VT.getSizeInBits();
24934 if (ShAmt1.getOpcode() == ISD::SUB) {
24935 SDValue Sum = ShAmt1.getOperand(0);
24936 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24937 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24938 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24939 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24940 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24941 return DAG.getNode(Opc, DL, VT,
24943 DAG.getNode(ISD::TRUNCATE, DL,
24946 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24947 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24949 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24950 return DAG.getNode(Opc, DL, VT,
24951 N0.getOperand(0), N1.getOperand(0),
24952 DAG.getNode(ISD::TRUNCATE, DL,
24959 // Generate NEG and CMOV for integer abs.
24960 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24961 EVT VT = N->getValueType(0);
24963 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24964 // 8-bit integer abs to NEG and CMOV.
24965 if (VT.isInteger() && VT.getSizeInBits() == 8)
24968 SDValue N0 = N->getOperand(0);
24969 SDValue N1 = N->getOperand(1);
24972 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24973 // and change it to SUB and CMOV.
24974 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24975 N0.getOpcode() == ISD::ADD &&
24976 N0.getOperand(1) == N1 &&
24977 N1.getOpcode() == ISD::SRA &&
24978 N1.getOperand(0) == N0.getOperand(0))
24979 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24980 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24981 // Generate SUB & CMOV.
24982 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24983 DAG.getConstant(0, VT), N0.getOperand(0));
24985 SDValue Ops[] = { N0.getOperand(0), Neg,
24986 DAG.getConstant(X86::COND_GE, MVT::i8),
24987 SDValue(Neg.getNode(), 1) };
24988 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24993 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24994 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24995 TargetLowering::DAGCombinerInfo &DCI,
24996 const X86Subtarget *Subtarget) {
24997 if (DCI.isBeforeLegalizeOps())
25000 if (Subtarget->hasCMov()) {
25001 SDValue RV = performIntegerAbsCombine(N, DAG);
25009 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25010 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25011 TargetLowering::DAGCombinerInfo &DCI,
25012 const X86Subtarget *Subtarget) {
25013 LoadSDNode *Ld = cast<LoadSDNode>(N);
25014 EVT RegVT = Ld->getValueType(0);
25015 EVT MemVT = Ld->getMemoryVT();
25017 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25019 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25020 // into two 16-byte operations.
25021 ISD::LoadExtType Ext = Ld->getExtensionType();
25022 unsigned Alignment = Ld->getAlignment();
25023 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25024 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25025 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25026 unsigned NumElems = RegVT.getVectorNumElements();
25030 SDValue Ptr = Ld->getBasePtr();
25031 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25033 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25035 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25036 Ld->getPointerInfo(), Ld->isVolatile(),
25037 Ld->isNonTemporal(), Ld->isInvariant(),
25039 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25040 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25041 Ld->getPointerInfo(), Ld->isVolatile(),
25042 Ld->isNonTemporal(), Ld->isInvariant(),
25043 std::min(16U, Alignment));
25044 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25046 Load2.getValue(1));
25048 SDValue NewVec = DAG.getUNDEF(RegVT);
25049 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25050 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25051 return DCI.CombineTo(N, NewVec, TF, true);
25057 /// PerformMLOADCombine - Resolve extending loads
25058 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25059 TargetLowering::DAGCombinerInfo &DCI,
25060 const X86Subtarget *Subtarget) {
25061 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25062 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25065 EVT VT = Mld->getValueType(0);
25066 unsigned NumElems = VT.getVectorNumElements();
25067 EVT LdVT = Mld->getMemoryVT();
25070 assert(LdVT != VT && "Cannot extend to the same type");
25071 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25072 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25073 // From, To sizes and ElemCount must be pow of two
25074 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25075 "Unexpected size for extending masked load");
25077 unsigned SizeRatio = ToSz / FromSz;
25078 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25080 // Create a type on which we perform the shuffle
25081 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25082 LdVT.getScalarType(), NumElems*SizeRatio);
25083 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25085 // Convert Src0 value
25086 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25087 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25088 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25089 for (unsigned i = 0; i != NumElems; ++i)
25090 ShuffleVec[i] = i * SizeRatio;
25092 // Can't shuffle using an illegal type.
25093 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25094 && "WideVecVT should be legal");
25095 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25096 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25098 // Prepare the new mask
25100 SDValue Mask = Mld->getMask();
25101 if (Mask.getValueType() == VT) {
25102 // Mask and original value have the same type
25103 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25104 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25105 for (unsigned i = 0; i != NumElems; ++i)
25106 ShuffleVec[i] = i * SizeRatio;
25107 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25108 ShuffleVec[i] = NumElems*SizeRatio;
25109 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25110 DAG.getConstant(0, WideVecVT),
25114 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25115 unsigned WidenNumElts = NumElems*SizeRatio;
25116 unsigned MaskNumElts = VT.getVectorNumElements();
25117 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25120 unsigned NumConcat = WidenNumElts / MaskNumElts;
25121 SmallVector<SDValue, 16> Ops(NumConcat);
25122 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25124 for (unsigned i = 1; i != NumConcat; ++i)
25127 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25130 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25131 Mld->getBasePtr(), NewMask, WideSrc0,
25132 Mld->getMemoryVT(), Mld->getMemOperand(),
25134 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25135 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25138 /// PerformMSTORECombine - Resolve truncating stores
25139 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25140 const X86Subtarget *Subtarget) {
25141 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25142 if (!Mst->isTruncatingStore())
25145 EVT VT = Mst->getValue().getValueType();
25146 unsigned NumElems = VT.getVectorNumElements();
25147 EVT StVT = Mst->getMemoryVT();
25150 assert(StVT != VT && "Cannot truncate to the same type");
25151 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25152 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25154 // From, To sizes and ElemCount must be pow of two
25155 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25156 "Unexpected size for truncating masked store");
25157 // We are going to use the original vector elt for storing.
25158 // Accumulated smaller vector elements must be a multiple of the store size.
25159 assert (((NumElems * FromSz) % ToSz) == 0 &&
25160 "Unexpected ratio for truncating masked store");
25162 unsigned SizeRatio = FromSz / ToSz;
25163 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25165 // Create a type on which we perform the shuffle
25166 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25167 StVT.getScalarType(), NumElems*SizeRatio);
25169 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25171 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25172 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25173 for (unsigned i = 0; i != NumElems; ++i)
25174 ShuffleVec[i] = i * SizeRatio;
25176 // Can't shuffle using an illegal type.
25177 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25178 && "WideVecVT should be legal");
25180 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25181 DAG.getUNDEF(WideVecVT),
25185 SDValue Mask = Mst->getMask();
25186 if (Mask.getValueType() == VT) {
25187 // Mask and original value have the same type
25188 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25189 for (unsigned i = 0; i != NumElems; ++i)
25190 ShuffleVec[i] = i * SizeRatio;
25191 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25192 ShuffleVec[i] = NumElems*SizeRatio;
25193 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25194 DAG.getConstant(0, WideVecVT),
25198 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25199 unsigned WidenNumElts = NumElems*SizeRatio;
25200 unsigned MaskNumElts = VT.getVectorNumElements();
25201 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25204 unsigned NumConcat = WidenNumElts / MaskNumElts;
25205 SmallVector<SDValue, 16> Ops(NumConcat);
25206 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25208 for (unsigned i = 1; i != NumConcat; ++i)
25211 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25214 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25215 NewMask, StVT, Mst->getMemOperand(), false);
25217 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25218 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25219 const X86Subtarget *Subtarget) {
25220 StoreSDNode *St = cast<StoreSDNode>(N);
25221 EVT VT = St->getValue().getValueType();
25222 EVT StVT = St->getMemoryVT();
25224 SDValue StoredVal = St->getOperand(1);
25225 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25227 // If we are saving a concatenation of two XMM registers and 32-byte stores
25228 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25229 unsigned Alignment = St->getAlignment();
25230 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25231 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25232 StVT == VT && !IsAligned) {
25233 unsigned NumElems = VT.getVectorNumElements();
25237 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25238 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25240 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25241 SDValue Ptr0 = St->getBasePtr();
25242 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25244 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25245 St->getPointerInfo(), St->isVolatile(),
25246 St->isNonTemporal(), Alignment);
25247 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25248 St->getPointerInfo(), St->isVolatile(),
25249 St->isNonTemporal(),
25250 std::min(16U, Alignment));
25251 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25254 // Optimize trunc store (of multiple scalars) to shuffle and store.
25255 // First, pack all of the elements in one place. Next, store to memory
25256 // in fewer chunks.
25257 if (St->isTruncatingStore() && VT.isVector()) {
25258 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25259 unsigned NumElems = VT.getVectorNumElements();
25260 assert(StVT != VT && "Cannot truncate to the same type");
25261 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25262 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25264 // From, To sizes and ElemCount must be pow of two
25265 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25266 // We are going to use the original vector elt for storing.
25267 // Accumulated smaller vector elements must be a multiple of the store size.
25268 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25270 unsigned SizeRatio = FromSz / ToSz;
25272 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25274 // Create a type on which we perform the shuffle
25275 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25276 StVT.getScalarType(), NumElems*SizeRatio);
25278 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25280 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25281 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25282 for (unsigned i = 0; i != NumElems; ++i)
25283 ShuffleVec[i] = i * SizeRatio;
25285 // Can't shuffle using an illegal type.
25286 if (!TLI.isTypeLegal(WideVecVT))
25289 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25290 DAG.getUNDEF(WideVecVT),
25292 // At this point all of the data is stored at the bottom of the
25293 // register. We now need to save it to mem.
25295 // Find the largest store unit
25296 MVT StoreType = MVT::i8;
25297 for (MVT Tp : MVT::integer_valuetypes()) {
25298 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25302 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25303 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25304 (64 <= NumElems * ToSz))
25305 StoreType = MVT::f64;
25307 // Bitcast the original vector into a vector of store-size units
25308 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25309 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25310 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25311 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25312 SmallVector<SDValue, 8> Chains;
25313 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25314 TLI.getPointerTy());
25315 SDValue Ptr = St->getBasePtr();
25317 // Perform one or more big stores into memory.
25318 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25319 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25320 StoreType, ShuffWide,
25321 DAG.getIntPtrConstant(i));
25322 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25323 St->getPointerInfo(), St->isVolatile(),
25324 St->isNonTemporal(), St->getAlignment());
25325 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25326 Chains.push_back(Ch);
25329 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25332 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25333 // the FP state in cases where an emms may be missing.
25334 // A preferable solution to the general problem is to figure out the right
25335 // places to insert EMMS. This qualifies as a quick hack.
25337 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25338 if (VT.getSizeInBits() != 64)
25341 const Function *F = DAG.getMachineFunction().getFunction();
25342 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25343 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25344 && Subtarget->hasSSE2();
25345 if ((VT.isVector() ||
25346 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25347 isa<LoadSDNode>(St->getValue()) &&
25348 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25349 St->getChain().hasOneUse() && !St->isVolatile()) {
25350 SDNode* LdVal = St->getValue().getNode();
25351 LoadSDNode *Ld = nullptr;
25352 int TokenFactorIndex = -1;
25353 SmallVector<SDValue, 8> Ops;
25354 SDNode* ChainVal = St->getChain().getNode();
25355 // Must be a store of a load. We currently handle two cases: the load
25356 // is a direct child, and it's under an intervening TokenFactor. It is
25357 // possible to dig deeper under nested TokenFactors.
25358 if (ChainVal == LdVal)
25359 Ld = cast<LoadSDNode>(St->getChain());
25360 else if (St->getValue().hasOneUse() &&
25361 ChainVal->getOpcode() == ISD::TokenFactor) {
25362 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25363 if (ChainVal->getOperand(i).getNode() == LdVal) {
25364 TokenFactorIndex = i;
25365 Ld = cast<LoadSDNode>(St->getValue());
25367 Ops.push_back(ChainVal->getOperand(i));
25371 if (!Ld || !ISD::isNormalLoad(Ld))
25374 // If this is not the MMX case, i.e. we are just turning i64 load/store
25375 // into f64 load/store, avoid the transformation if there are multiple
25376 // uses of the loaded value.
25377 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25382 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25383 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25385 if (Subtarget->is64Bit() || F64IsLegal) {
25386 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25387 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25388 Ld->getPointerInfo(), Ld->isVolatile(),
25389 Ld->isNonTemporal(), Ld->isInvariant(),
25390 Ld->getAlignment());
25391 SDValue NewChain = NewLd.getValue(1);
25392 if (TokenFactorIndex != -1) {
25393 Ops.push_back(NewChain);
25394 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25396 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25397 St->getPointerInfo(),
25398 St->isVolatile(), St->isNonTemporal(),
25399 St->getAlignment());
25402 // Otherwise, lower to two pairs of 32-bit loads / stores.
25403 SDValue LoAddr = Ld->getBasePtr();
25404 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25405 DAG.getConstant(4, MVT::i32));
25407 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25408 Ld->getPointerInfo(),
25409 Ld->isVolatile(), Ld->isNonTemporal(),
25410 Ld->isInvariant(), Ld->getAlignment());
25411 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25412 Ld->getPointerInfo().getWithOffset(4),
25413 Ld->isVolatile(), Ld->isNonTemporal(),
25415 MinAlign(Ld->getAlignment(), 4));
25417 SDValue NewChain = LoLd.getValue(1);
25418 if (TokenFactorIndex != -1) {
25419 Ops.push_back(LoLd);
25420 Ops.push_back(HiLd);
25421 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25424 LoAddr = St->getBasePtr();
25425 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25426 DAG.getConstant(4, MVT::i32));
25428 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25429 St->getPointerInfo(),
25430 St->isVolatile(), St->isNonTemporal(),
25431 St->getAlignment());
25432 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25433 St->getPointerInfo().getWithOffset(4),
25435 St->isNonTemporal(),
25436 MinAlign(St->getAlignment(), 4));
25437 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25442 /// Return 'true' if this vector operation is "horizontal"
25443 /// and return the operands for the horizontal operation in LHS and RHS. A
25444 /// horizontal operation performs the binary operation on successive elements
25445 /// of its first operand, then on successive elements of its second operand,
25446 /// returning the resulting values in a vector. For example, if
25447 /// A = < float a0, float a1, float a2, float a3 >
25449 /// B = < float b0, float b1, float b2, float b3 >
25450 /// then the result of doing a horizontal operation on A and B is
25451 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25452 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25453 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25454 /// set to A, RHS to B, and the routine returns 'true'.
25455 /// Note that the binary operation should have the property that if one of the
25456 /// operands is UNDEF then the result is UNDEF.
25457 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25458 // Look for the following pattern: if
25459 // A = < float a0, float a1, float a2, float a3 >
25460 // B = < float b0, float b1, float b2, float b3 >
25462 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25463 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25464 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25465 // which is A horizontal-op B.
25467 // At least one of the operands should be a vector shuffle.
25468 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25469 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25472 MVT VT = LHS.getSimpleValueType();
25474 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25475 "Unsupported vector type for horizontal add/sub");
25477 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25478 // operate independently on 128-bit lanes.
25479 unsigned NumElts = VT.getVectorNumElements();
25480 unsigned NumLanes = VT.getSizeInBits()/128;
25481 unsigned NumLaneElts = NumElts / NumLanes;
25482 assert((NumLaneElts % 2 == 0) &&
25483 "Vector type should have an even number of elements in each lane");
25484 unsigned HalfLaneElts = NumLaneElts/2;
25486 // View LHS in the form
25487 // LHS = VECTOR_SHUFFLE A, B, LMask
25488 // If LHS is not a shuffle then pretend it is the shuffle
25489 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25490 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25493 SmallVector<int, 16> LMask(NumElts);
25494 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25495 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25496 A = LHS.getOperand(0);
25497 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25498 B = LHS.getOperand(1);
25499 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25500 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25502 if (LHS.getOpcode() != ISD::UNDEF)
25504 for (unsigned i = 0; i != NumElts; ++i)
25508 // Likewise, view RHS in the form
25509 // RHS = VECTOR_SHUFFLE C, D, RMask
25511 SmallVector<int, 16> RMask(NumElts);
25512 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25513 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25514 C = RHS.getOperand(0);
25515 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25516 D = RHS.getOperand(1);
25517 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25518 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25520 if (RHS.getOpcode() != ISD::UNDEF)
25522 for (unsigned i = 0; i != NumElts; ++i)
25526 // Check that the shuffles are both shuffling the same vectors.
25527 if (!(A == C && B == D) && !(A == D && B == C))
25530 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25531 if (!A.getNode() && !B.getNode())
25534 // If A and B occur in reverse order in RHS, then "swap" them (which means
25535 // rewriting the mask).
25537 CommuteVectorShuffleMask(RMask, NumElts);
25539 // At this point LHS and RHS are equivalent to
25540 // LHS = VECTOR_SHUFFLE A, B, LMask
25541 // RHS = VECTOR_SHUFFLE A, B, RMask
25542 // Check that the masks correspond to performing a horizontal operation.
25543 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25544 for (unsigned i = 0; i != NumLaneElts; ++i) {
25545 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25547 // Ignore any UNDEF components.
25548 if (LIdx < 0 || RIdx < 0 ||
25549 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25550 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25553 // Check that successive elements are being operated on. If not, this is
25554 // not a horizontal operation.
25555 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25556 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25557 if (!(LIdx == Index && RIdx == Index + 1) &&
25558 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25563 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25564 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25568 /// Do target-specific dag combines on floating point adds.
25569 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25570 const X86Subtarget *Subtarget) {
25571 EVT VT = N->getValueType(0);
25572 SDValue LHS = N->getOperand(0);
25573 SDValue RHS = N->getOperand(1);
25575 // Try to synthesize horizontal adds from adds of shuffles.
25576 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25577 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25578 isHorizontalBinOp(LHS, RHS, true))
25579 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25583 /// Do target-specific dag combines on floating point subs.
25584 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25585 const X86Subtarget *Subtarget) {
25586 EVT VT = N->getValueType(0);
25587 SDValue LHS = N->getOperand(0);
25588 SDValue RHS = N->getOperand(1);
25590 // Try to synthesize horizontal subs from subs of shuffles.
25591 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25592 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25593 isHorizontalBinOp(LHS, RHS, false))
25594 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25598 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25599 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25600 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25602 // F[X]OR(0.0, x) -> x
25603 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25604 if (C->getValueAPF().isPosZero())
25605 return N->getOperand(1);
25607 // F[X]OR(x, 0.0) -> x
25608 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25609 if (C->getValueAPF().isPosZero())
25610 return N->getOperand(0);
25614 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25615 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25616 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25618 // Only perform optimizations if UnsafeMath is used.
25619 if (!DAG.getTarget().Options.UnsafeFPMath)
25622 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25623 // into FMINC and FMAXC, which are Commutative operations.
25624 unsigned NewOp = 0;
25625 switch (N->getOpcode()) {
25626 default: llvm_unreachable("unknown opcode");
25627 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25628 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25631 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25632 N->getOperand(0), N->getOperand(1));
25635 /// Do target-specific dag combines on X86ISD::FAND nodes.
25636 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25637 // FAND(0.0, x) -> 0.0
25638 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25639 if (C->getValueAPF().isPosZero())
25640 return N->getOperand(0);
25642 // FAND(x, 0.0) -> 0.0
25643 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25644 if (C->getValueAPF().isPosZero())
25645 return N->getOperand(1);
25650 /// Do target-specific dag combines on X86ISD::FANDN nodes
25651 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25652 // FANDN(0.0, x) -> x
25653 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25654 if (C->getValueAPF().isPosZero())
25655 return N->getOperand(1);
25657 // FANDN(x, 0.0) -> 0.0
25658 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25659 if (C->getValueAPF().isPosZero())
25660 return N->getOperand(1);
25665 static SDValue PerformBTCombine(SDNode *N,
25667 TargetLowering::DAGCombinerInfo &DCI) {
25668 // BT ignores high bits in the bit index operand.
25669 SDValue Op1 = N->getOperand(1);
25670 if (Op1.hasOneUse()) {
25671 unsigned BitWidth = Op1.getValueSizeInBits();
25672 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25673 APInt KnownZero, KnownOne;
25674 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25675 !DCI.isBeforeLegalizeOps());
25676 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25677 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25678 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25679 DCI.CommitTargetLoweringOpt(TLO);
25684 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25685 SDValue Op = N->getOperand(0);
25686 if (Op.getOpcode() == ISD::BITCAST)
25687 Op = Op.getOperand(0);
25688 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25689 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25690 VT.getVectorElementType().getSizeInBits() ==
25691 OpVT.getVectorElementType().getSizeInBits()) {
25692 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25697 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25698 const X86Subtarget *Subtarget) {
25699 EVT VT = N->getValueType(0);
25700 if (!VT.isVector())
25703 SDValue N0 = N->getOperand(0);
25704 SDValue N1 = N->getOperand(1);
25705 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25708 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25709 // both SSE and AVX2 since there is no sign-extended shift right
25710 // operation on a vector with 64-bit elements.
25711 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25712 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25713 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25714 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25715 SDValue N00 = N0.getOperand(0);
25717 // EXTLOAD has a better solution on AVX2,
25718 // it may be replaced with X86ISD::VSEXT node.
25719 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25720 if (!ISD::isNormalLoad(N00.getNode()))
25723 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25724 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25726 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25732 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25733 TargetLowering::DAGCombinerInfo &DCI,
25734 const X86Subtarget *Subtarget) {
25735 SDValue N0 = N->getOperand(0);
25736 EVT VT = N->getValueType(0);
25738 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25739 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25740 // This exposes the sext to the sdivrem lowering, so that it directly extends
25741 // from AH (which we otherwise need to do contortions to access).
25742 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25743 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25745 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25746 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25747 N0.getOperand(0), N0.getOperand(1));
25748 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25749 return R.getValue(1);
25752 if (!DCI.isBeforeLegalizeOps())
25755 if (!Subtarget->hasFp256())
25758 if (VT.isVector() && VT.getSizeInBits() == 256) {
25759 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25767 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25768 const X86Subtarget* Subtarget) {
25770 EVT VT = N->getValueType(0);
25772 // Let legalize expand this if it isn't a legal type yet.
25773 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25776 EVT ScalarVT = VT.getScalarType();
25777 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25778 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25781 SDValue A = N->getOperand(0);
25782 SDValue B = N->getOperand(1);
25783 SDValue C = N->getOperand(2);
25785 bool NegA = (A.getOpcode() == ISD::FNEG);
25786 bool NegB = (B.getOpcode() == ISD::FNEG);
25787 bool NegC = (C.getOpcode() == ISD::FNEG);
25789 // Negative multiplication when NegA xor NegB
25790 bool NegMul = (NegA != NegB);
25792 A = A.getOperand(0);
25794 B = B.getOperand(0);
25796 C = C.getOperand(0);
25800 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25802 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25804 return DAG.getNode(Opcode, dl, VT, A, B, C);
25807 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25808 TargetLowering::DAGCombinerInfo &DCI,
25809 const X86Subtarget *Subtarget) {
25810 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25811 // (and (i32 x86isd::setcc_carry), 1)
25812 // This eliminates the zext. This transformation is necessary because
25813 // ISD::SETCC is always legalized to i8.
25815 SDValue N0 = N->getOperand(0);
25816 EVT VT = N->getValueType(0);
25818 if (N0.getOpcode() == ISD::AND &&
25820 N0.getOperand(0).hasOneUse()) {
25821 SDValue N00 = N0.getOperand(0);
25822 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25823 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25824 if (!C || C->getZExtValue() != 1)
25826 return DAG.getNode(ISD::AND, dl, VT,
25827 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25828 N00.getOperand(0), N00.getOperand(1)),
25829 DAG.getConstant(1, VT));
25833 if (N0.getOpcode() == ISD::TRUNCATE &&
25835 N0.getOperand(0).hasOneUse()) {
25836 SDValue N00 = N0.getOperand(0);
25837 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25838 return DAG.getNode(ISD::AND, dl, VT,
25839 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25840 N00.getOperand(0), N00.getOperand(1)),
25841 DAG.getConstant(1, VT));
25844 if (VT.is256BitVector()) {
25845 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25850 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25851 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25852 // This exposes the zext to the udivrem lowering, so that it directly extends
25853 // from AH (which we otherwise need to do contortions to access).
25854 if (N0.getOpcode() == ISD::UDIVREM &&
25855 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25856 (VT == MVT::i32 || VT == MVT::i64)) {
25857 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25858 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25859 N0.getOperand(0), N0.getOperand(1));
25860 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25861 return R.getValue(1);
25867 // Optimize x == -y --> x+y == 0
25868 // x != -y --> x+y != 0
25869 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25870 const X86Subtarget* Subtarget) {
25871 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25872 SDValue LHS = N->getOperand(0);
25873 SDValue RHS = N->getOperand(1);
25874 EVT VT = N->getValueType(0);
25877 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25878 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25879 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25880 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25881 LHS.getValueType(), RHS, LHS.getOperand(1));
25882 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25883 addV, DAG.getConstant(0, addV.getValueType()), CC);
25885 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25886 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25887 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25888 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25889 RHS.getValueType(), LHS, RHS.getOperand(1));
25890 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25891 addV, DAG.getConstant(0, addV.getValueType()), CC);
25894 if (VT.getScalarType() == MVT::i1) {
25895 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25896 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25897 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25898 if (!IsSEXT0 && !IsVZero0)
25900 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25901 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25902 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25904 if (!IsSEXT1 && !IsVZero1)
25907 if (IsSEXT0 && IsVZero1) {
25908 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25909 if (CC == ISD::SETEQ)
25910 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25911 return LHS.getOperand(0);
25913 if (IsSEXT1 && IsVZero0) {
25914 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25915 if (CC == ISD::SETEQ)
25916 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25917 return RHS.getOperand(0);
25924 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25925 const X86Subtarget *Subtarget) {
25927 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25928 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25929 "X86insertps is only defined for v4x32");
25931 SDValue Ld = N->getOperand(1);
25932 if (MayFoldLoad(Ld)) {
25933 // Extract the countS bits from the immediate so we can get the proper
25934 // address when narrowing the vector load to a specific element.
25935 // When the second source op is a memory address, interps doesn't use
25936 // countS and just gets an f32 from that address.
25937 unsigned DestIndex =
25938 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25939 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25943 // Create this as a scalar to vector to match the instruction pattern.
25944 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25945 // countS bits are ignored when loading from memory on insertps, which
25946 // means we don't need to explicitly set them to 0.
25947 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25948 LoadScalarToVector, N->getOperand(2));
25951 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25952 // as "sbb reg,reg", since it can be extended without zext and produces
25953 // an all-ones bit which is more useful than 0/1 in some cases.
25954 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25957 return DAG.getNode(ISD::AND, DL, VT,
25958 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25959 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25960 DAG.getConstant(1, VT));
25961 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25962 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25963 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25964 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25967 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25968 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25969 TargetLowering::DAGCombinerInfo &DCI,
25970 const X86Subtarget *Subtarget) {
25972 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25973 SDValue EFLAGS = N->getOperand(1);
25975 if (CC == X86::COND_A) {
25976 // Try to convert COND_A into COND_B in an attempt to facilitate
25977 // materializing "setb reg".
25979 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25980 // cannot take an immediate as its first operand.
25982 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25983 EFLAGS.getValueType().isInteger() &&
25984 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25985 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25986 EFLAGS.getNode()->getVTList(),
25987 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25988 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25989 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25993 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25994 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25996 if (CC == X86::COND_B)
25997 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26001 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26002 if (Flags.getNode()) {
26003 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26004 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26010 // Optimize branch condition evaluation.
26012 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26013 TargetLowering::DAGCombinerInfo &DCI,
26014 const X86Subtarget *Subtarget) {
26016 SDValue Chain = N->getOperand(0);
26017 SDValue Dest = N->getOperand(1);
26018 SDValue EFLAGS = N->getOperand(3);
26019 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26023 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26024 if (Flags.getNode()) {
26025 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26026 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26033 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26034 SelectionDAG &DAG) {
26035 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26036 // optimize away operation when it's from a constant.
26038 // The general transformation is:
26039 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26040 // AND(VECTOR_CMP(x,y), constant2)
26041 // constant2 = UNARYOP(constant)
26043 // Early exit if this isn't a vector operation, the operand of the
26044 // unary operation isn't a bitwise AND, or if the sizes of the operations
26045 // aren't the same.
26046 EVT VT = N->getValueType(0);
26047 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26048 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26049 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26052 // Now check that the other operand of the AND is a constant. We could
26053 // make the transformation for non-constant splats as well, but it's unclear
26054 // that would be a benefit as it would not eliminate any operations, just
26055 // perform one more step in scalar code before moving to the vector unit.
26056 if (BuildVectorSDNode *BV =
26057 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26058 // Bail out if the vector isn't a constant.
26059 if (!BV->isConstant())
26062 // Everything checks out. Build up the new and improved node.
26064 EVT IntVT = BV->getValueType(0);
26065 // Create a new constant of the appropriate type for the transformed
26067 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26068 // The AND node needs bitcasts to/from an integer vector type around it.
26069 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26070 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26071 N->getOperand(0)->getOperand(0), MaskConst);
26072 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26079 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26080 const X86Subtarget *Subtarget) {
26081 // First try to optimize away the conversion entirely when it's
26082 // conditionally from a constant. Vectors only.
26083 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26084 if (Res != SDValue())
26087 // Now move on to more general possibilities.
26088 SDValue Op0 = N->getOperand(0);
26089 EVT InVT = Op0->getValueType(0);
26091 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26092 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26094 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26095 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26096 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26099 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26100 // a 32-bit target where SSE doesn't support i64->FP operations.
26101 if (Op0.getOpcode() == ISD::LOAD) {
26102 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26103 EVT VT = Ld->getValueType(0);
26104 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26105 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26106 !Subtarget->is64Bit() && VT == MVT::i64) {
26107 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26108 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26109 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26116 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26117 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26118 X86TargetLowering::DAGCombinerInfo &DCI) {
26119 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26120 // the result is either zero or one (depending on the input carry bit).
26121 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26122 if (X86::isZeroNode(N->getOperand(0)) &&
26123 X86::isZeroNode(N->getOperand(1)) &&
26124 // We don't have a good way to replace an EFLAGS use, so only do this when
26126 SDValue(N, 1).use_empty()) {
26128 EVT VT = N->getValueType(0);
26129 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26130 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26131 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26132 DAG.getConstant(X86::COND_B,MVT::i8),
26134 DAG.getConstant(1, VT));
26135 return DCI.CombineTo(N, Res1, CarryOut);
26141 // fold (add Y, (sete X, 0)) -> adc 0, Y
26142 // (add Y, (setne X, 0)) -> sbb -1, Y
26143 // (sub (sete X, 0), Y) -> sbb 0, Y
26144 // (sub (setne X, 0), Y) -> adc -1, Y
26145 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26148 // Look through ZExts.
26149 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26150 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26153 SDValue SetCC = Ext.getOperand(0);
26154 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26157 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26158 if (CC != X86::COND_E && CC != X86::COND_NE)
26161 SDValue Cmp = SetCC.getOperand(1);
26162 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26163 !X86::isZeroNode(Cmp.getOperand(1)) ||
26164 !Cmp.getOperand(0).getValueType().isInteger())
26167 SDValue CmpOp0 = Cmp.getOperand(0);
26168 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26169 DAG.getConstant(1, CmpOp0.getValueType()));
26171 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26172 if (CC == X86::COND_NE)
26173 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26174 DL, OtherVal.getValueType(), OtherVal,
26175 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26176 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26177 DL, OtherVal.getValueType(), OtherVal,
26178 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26181 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26182 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26183 const X86Subtarget *Subtarget) {
26184 EVT VT = N->getValueType(0);
26185 SDValue Op0 = N->getOperand(0);
26186 SDValue Op1 = N->getOperand(1);
26188 // Try to synthesize horizontal adds from adds of shuffles.
26189 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26190 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26191 isHorizontalBinOp(Op0, Op1, true))
26192 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26194 return OptimizeConditionalInDecrement(N, DAG);
26197 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26198 const X86Subtarget *Subtarget) {
26199 SDValue Op0 = N->getOperand(0);
26200 SDValue Op1 = N->getOperand(1);
26202 // X86 can't encode an immediate LHS of a sub. See if we can push the
26203 // negation into a preceding instruction.
26204 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26205 // If the RHS of the sub is a XOR with one use and a constant, invert the
26206 // immediate. Then add one to the LHS of the sub so we can turn
26207 // X-Y -> X+~Y+1, saving one register.
26208 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26209 isa<ConstantSDNode>(Op1.getOperand(1))) {
26210 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26211 EVT VT = Op0.getValueType();
26212 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26214 DAG.getConstant(~XorC, VT));
26215 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26216 DAG.getConstant(C->getAPIntValue()+1, VT));
26220 // Try to synthesize horizontal adds from adds of shuffles.
26221 EVT VT = N->getValueType(0);
26222 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26223 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26224 isHorizontalBinOp(Op0, Op1, true))
26225 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26227 return OptimizeConditionalInDecrement(N, DAG);
26230 /// performVZEXTCombine - Performs build vector combines
26231 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26232 TargetLowering::DAGCombinerInfo &DCI,
26233 const X86Subtarget *Subtarget) {
26235 MVT VT = N->getSimpleValueType(0);
26236 SDValue Op = N->getOperand(0);
26237 MVT OpVT = Op.getSimpleValueType();
26238 MVT OpEltVT = OpVT.getVectorElementType();
26239 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26241 // (vzext (bitcast (vzext (x)) -> (vzext x)
26243 while (V.getOpcode() == ISD::BITCAST)
26244 V = V.getOperand(0);
26246 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26247 MVT InnerVT = V.getSimpleValueType();
26248 MVT InnerEltVT = InnerVT.getVectorElementType();
26250 // If the element sizes match exactly, we can just do one larger vzext. This
26251 // is always an exact type match as vzext operates on integer types.
26252 if (OpEltVT == InnerEltVT) {
26253 assert(OpVT == InnerVT && "Types must match for vzext!");
26254 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26257 // The only other way we can combine them is if only a single element of the
26258 // inner vzext is used in the input to the outer vzext.
26259 if (InnerEltVT.getSizeInBits() < InputBits)
26262 // In this case, the inner vzext is completely dead because we're going to
26263 // only look at bits inside of the low element. Just do the outer vzext on
26264 // a bitcast of the input to the inner.
26265 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26266 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26269 // Check if we can bypass extracting and re-inserting an element of an input
26270 // vector. Essentialy:
26271 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26272 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26273 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26274 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26275 SDValue ExtractedV = V.getOperand(0);
26276 SDValue OrigV = ExtractedV.getOperand(0);
26277 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26278 if (ExtractIdx->getZExtValue() == 0) {
26279 MVT OrigVT = OrigV.getSimpleValueType();
26280 // Extract a subvector if necessary...
26281 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26282 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26283 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26284 OrigVT.getVectorNumElements() / Ratio);
26285 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26286 DAG.getIntPtrConstant(0));
26288 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26289 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26296 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26297 DAGCombinerInfo &DCI) const {
26298 SelectionDAG &DAG = DCI.DAG;
26299 switch (N->getOpcode()) {
26301 case ISD::EXTRACT_VECTOR_ELT:
26302 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26305 case X86ISD::SHRUNKBLEND:
26306 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26307 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26308 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26309 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26310 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26311 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26312 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26315 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26316 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26317 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26318 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26319 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26320 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26321 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26322 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26323 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26324 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26325 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26327 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26329 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26330 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26331 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26332 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26333 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26334 case ISD::ANY_EXTEND:
26335 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26336 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26337 case ISD::SIGN_EXTEND_INREG:
26338 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26339 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26340 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26341 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26342 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26343 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26344 case X86ISD::SHUFP: // Handle all target specific shuffles
26345 case X86ISD::PALIGNR:
26346 case X86ISD::UNPCKH:
26347 case X86ISD::UNPCKL:
26348 case X86ISD::MOVHLPS:
26349 case X86ISD::MOVLHPS:
26350 case X86ISD::PSHUFB:
26351 case X86ISD::PSHUFD:
26352 case X86ISD::PSHUFHW:
26353 case X86ISD::PSHUFLW:
26354 case X86ISD::MOVSS:
26355 case X86ISD::MOVSD:
26356 case X86ISD::VPERMILPI:
26357 case X86ISD::VPERM2X128:
26358 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26359 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26360 case ISD::INTRINSIC_WO_CHAIN:
26361 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26362 case X86ISD::INSERTPS: {
26363 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26364 return PerformINSERTPSCombine(N, DAG, Subtarget);
26367 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26373 /// isTypeDesirableForOp - Return true if the target has native support for
26374 /// the specified value type and it is 'desirable' to use the type for the
26375 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26376 /// instruction encodings are longer and some i16 instructions are slow.
26377 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26378 if (!isTypeLegal(VT))
26380 if (VT != MVT::i16)
26387 case ISD::SIGN_EXTEND:
26388 case ISD::ZERO_EXTEND:
26389 case ISD::ANY_EXTEND:
26402 /// IsDesirableToPromoteOp - This method query the target whether it is
26403 /// beneficial for dag combiner to promote the specified node. If true, it
26404 /// should return the desired promotion type by reference.
26405 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26406 EVT VT = Op.getValueType();
26407 if (VT != MVT::i16)
26410 bool Promote = false;
26411 bool Commute = false;
26412 switch (Op.getOpcode()) {
26415 LoadSDNode *LD = cast<LoadSDNode>(Op);
26416 // If the non-extending load has a single use and it's not live out, then it
26417 // might be folded.
26418 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26419 Op.hasOneUse()*/) {
26420 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26421 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26422 // The only case where we'd want to promote LOAD (rather then it being
26423 // promoted as an operand is when it's only use is liveout.
26424 if (UI->getOpcode() != ISD::CopyToReg)
26431 case ISD::SIGN_EXTEND:
26432 case ISD::ZERO_EXTEND:
26433 case ISD::ANY_EXTEND:
26438 SDValue N0 = Op.getOperand(0);
26439 // Look out for (store (shl (load), x)).
26440 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26453 SDValue N0 = Op.getOperand(0);
26454 SDValue N1 = Op.getOperand(1);
26455 if (!Commute && MayFoldLoad(N1))
26457 // Avoid disabling potential load folding opportunities.
26458 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26460 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26470 //===----------------------------------------------------------------------===//
26471 // X86 Inline Assembly Support
26472 //===----------------------------------------------------------------------===//
26475 // Helper to match a string separated by whitespace.
26476 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26477 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26479 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26480 StringRef piece(*args[i]);
26481 if (!s.startswith(piece)) // Check if the piece matches.
26484 s = s.substr(piece.size());
26485 StringRef::size_type pos = s.find_first_not_of(" \t");
26486 if (pos == 0) // We matched a prefix.
26494 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26497 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26499 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26500 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26501 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26502 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26504 if (AsmPieces.size() == 3)
26506 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26513 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26514 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26516 std::string AsmStr = IA->getAsmString();
26518 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26519 if (!Ty || Ty->getBitWidth() % 16 != 0)
26522 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26523 SmallVector<StringRef, 4> AsmPieces;
26524 SplitString(AsmStr, AsmPieces, ";\n");
26526 switch (AsmPieces.size()) {
26527 default: return false;
26529 // FIXME: this should verify that we are targeting a 486 or better. If not,
26530 // we will turn this bswap into something that will be lowered to logical
26531 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26532 // lower so don't worry about this.
26534 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26535 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26536 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26537 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26538 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26539 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26540 // No need to check constraints, nothing other than the equivalent of
26541 // "=r,0" would be valid here.
26542 return IntrinsicLowering::LowerToByteSwap(CI);
26545 // rorw $$8, ${0:w} --> llvm.bswap.i16
26546 if (CI->getType()->isIntegerTy(16) &&
26547 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26548 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26549 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26551 const std::string &ConstraintsStr = IA->getConstraintString();
26552 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26553 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26554 if (clobbersFlagRegisters(AsmPieces))
26555 return IntrinsicLowering::LowerToByteSwap(CI);
26559 if (CI->getType()->isIntegerTy(32) &&
26560 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26561 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26562 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26563 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26565 const std::string &ConstraintsStr = IA->getConstraintString();
26566 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26567 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26568 if (clobbersFlagRegisters(AsmPieces))
26569 return IntrinsicLowering::LowerToByteSwap(CI);
26572 if (CI->getType()->isIntegerTy(64)) {
26573 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26574 if (Constraints.size() >= 2 &&
26575 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26576 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26577 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26578 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26579 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26580 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26581 return IntrinsicLowering::LowerToByteSwap(CI);
26589 /// getConstraintType - Given a constraint letter, return the type of
26590 /// constraint it is for this target.
26591 X86TargetLowering::ConstraintType
26592 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26593 if (Constraint.size() == 1) {
26594 switch (Constraint[0]) {
26605 return C_RegisterClass;
26629 return TargetLowering::getConstraintType(Constraint);
26632 /// Examine constraint type and operand type and determine a weight value.
26633 /// This object must already have been set up with the operand type
26634 /// and the current alternative constraint selected.
26635 TargetLowering::ConstraintWeight
26636 X86TargetLowering::getSingleConstraintMatchWeight(
26637 AsmOperandInfo &info, const char *constraint) const {
26638 ConstraintWeight weight = CW_Invalid;
26639 Value *CallOperandVal = info.CallOperandVal;
26640 // If we don't have a value, we can't do a match,
26641 // but allow it at the lowest weight.
26642 if (!CallOperandVal)
26644 Type *type = CallOperandVal->getType();
26645 // Look at the constraint type.
26646 switch (*constraint) {
26648 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26659 if (CallOperandVal->getType()->isIntegerTy())
26660 weight = CW_SpecificReg;
26665 if (type->isFloatingPointTy())
26666 weight = CW_SpecificReg;
26669 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26670 weight = CW_SpecificReg;
26674 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26675 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26676 weight = CW_Register;
26679 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26680 if (C->getZExtValue() <= 31)
26681 weight = CW_Constant;
26685 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26686 if (C->getZExtValue() <= 63)
26687 weight = CW_Constant;
26691 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26692 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26693 weight = CW_Constant;
26697 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26698 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26699 weight = CW_Constant;
26703 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26704 if (C->getZExtValue() <= 3)
26705 weight = CW_Constant;
26709 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26710 if (C->getZExtValue() <= 0xff)
26711 weight = CW_Constant;
26716 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26717 weight = CW_Constant;
26721 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26722 if ((C->getSExtValue() >= -0x80000000LL) &&
26723 (C->getSExtValue() <= 0x7fffffffLL))
26724 weight = CW_Constant;
26728 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26729 if (C->getZExtValue() <= 0xffffffff)
26730 weight = CW_Constant;
26737 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26738 /// with another that has more specific requirements based on the type of the
26739 /// corresponding operand.
26740 const char *X86TargetLowering::
26741 LowerXConstraint(EVT ConstraintVT) const {
26742 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26743 // 'f' like normal targets.
26744 if (ConstraintVT.isFloatingPoint()) {
26745 if (Subtarget->hasSSE2())
26747 if (Subtarget->hasSSE1())
26751 return TargetLowering::LowerXConstraint(ConstraintVT);
26754 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26755 /// vector. If it is invalid, don't add anything to Ops.
26756 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26757 std::string &Constraint,
26758 std::vector<SDValue>&Ops,
26759 SelectionDAG &DAG) const {
26762 // Only support length 1 constraints for now.
26763 if (Constraint.length() > 1) return;
26765 char ConstraintLetter = Constraint[0];
26766 switch (ConstraintLetter) {
26769 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26770 if (C->getZExtValue() <= 31) {
26771 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26777 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26778 if (C->getZExtValue() <= 63) {
26779 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26785 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26786 if (isInt<8>(C->getSExtValue())) {
26787 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26793 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26794 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26795 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26796 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26802 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26803 if (C->getZExtValue() <= 3) {
26804 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26810 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26811 if (C->getZExtValue() <= 255) {
26812 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26818 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26819 if (C->getZExtValue() <= 127) {
26820 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26826 // 32-bit signed value
26827 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26828 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26829 C->getSExtValue())) {
26830 // Widen to 64 bits here to get it sign extended.
26831 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26834 // FIXME gcc accepts some relocatable values here too, but only in certain
26835 // memory models; it's complicated.
26840 // 32-bit unsigned value
26841 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26842 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26843 C->getZExtValue())) {
26844 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26848 // FIXME gcc accepts some relocatable values here too, but only in certain
26849 // memory models; it's complicated.
26853 // Literal immediates are always ok.
26854 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26855 // Widen to 64 bits here to get it sign extended.
26856 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26860 // In any sort of PIC mode addresses need to be computed at runtime by
26861 // adding in a register or some sort of table lookup. These can't
26862 // be used as immediates.
26863 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26866 // If we are in non-pic codegen mode, we allow the address of a global (with
26867 // an optional displacement) to be used with 'i'.
26868 GlobalAddressSDNode *GA = nullptr;
26869 int64_t Offset = 0;
26871 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26873 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26874 Offset += GA->getOffset();
26876 } else if (Op.getOpcode() == ISD::ADD) {
26877 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26878 Offset += C->getZExtValue();
26879 Op = Op.getOperand(0);
26882 } else if (Op.getOpcode() == ISD::SUB) {
26883 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26884 Offset += -C->getZExtValue();
26885 Op = Op.getOperand(0);
26890 // Otherwise, this isn't something we can handle, reject it.
26894 const GlobalValue *GV = GA->getGlobal();
26895 // If we require an extra load to get this address, as in PIC mode, we
26896 // can't accept it.
26897 if (isGlobalStubReference(
26898 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26901 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26902 GA->getValueType(0), Offset);
26907 if (Result.getNode()) {
26908 Ops.push_back(Result);
26911 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26914 std::pair<unsigned, const TargetRegisterClass*>
26915 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26917 // First, see if this is a constraint that directly corresponds to an LLVM
26919 if (Constraint.size() == 1) {
26920 // GCC Constraint Letters
26921 switch (Constraint[0]) {
26923 // TODO: Slight differences here in allocation order and leaving
26924 // RIP in the class. Do they matter any more here than they do
26925 // in the normal allocation?
26926 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26927 if (Subtarget->is64Bit()) {
26928 if (VT == MVT::i32 || VT == MVT::f32)
26929 return std::make_pair(0U, &X86::GR32RegClass);
26930 if (VT == MVT::i16)
26931 return std::make_pair(0U, &X86::GR16RegClass);
26932 if (VT == MVT::i8 || VT == MVT::i1)
26933 return std::make_pair(0U, &X86::GR8RegClass);
26934 if (VT == MVT::i64 || VT == MVT::f64)
26935 return std::make_pair(0U, &X86::GR64RegClass);
26938 // 32-bit fallthrough
26939 case 'Q': // Q_REGS
26940 if (VT == MVT::i32 || VT == MVT::f32)
26941 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26942 if (VT == MVT::i16)
26943 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26944 if (VT == MVT::i8 || VT == MVT::i1)
26945 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26946 if (VT == MVT::i64)
26947 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26949 case 'r': // GENERAL_REGS
26950 case 'l': // INDEX_REGS
26951 if (VT == MVT::i8 || VT == MVT::i1)
26952 return std::make_pair(0U, &X86::GR8RegClass);
26953 if (VT == MVT::i16)
26954 return std::make_pair(0U, &X86::GR16RegClass);
26955 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26956 return std::make_pair(0U, &X86::GR32RegClass);
26957 return std::make_pair(0U, &X86::GR64RegClass);
26958 case 'R': // LEGACY_REGS
26959 if (VT == MVT::i8 || VT == MVT::i1)
26960 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26961 if (VT == MVT::i16)
26962 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26963 if (VT == MVT::i32 || !Subtarget->is64Bit())
26964 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26965 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26966 case 'f': // FP Stack registers.
26967 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26968 // value to the correct fpstack register class.
26969 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26970 return std::make_pair(0U, &X86::RFP32RegClass);
26971 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26972 return std::make_pair(0U, &X86::RFP64RegClass);
26973 return std::make_pair(0U, &X86::RFP80RegClass);
26974 case 'y': // MMX_REGS if MMX allowed.
26975 if (!Subtarget->hasMMX()) break;
26976 return std::make_pair(0U, &X86::VR64RegClass);
26977 case 'Y': // SSE_REGS if SSE2 allowed
26978 if (!Subtarget->hasSSE2()) break;
26980 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26981 if (!Subtarget->hasSSE1()) break;
26983 switch (VT.SimpleTy) {
26985 // Scalar SSE types.
26988 return std::make_pair(0U, &X86::FR32RegClass);
26991 return std::make_pair(0U, &X86::FR64RegClass);
26999 return std::make_pair(0U, &X86::VR128RegClass);
27007 return std::make_pair(0U, &X86::VR256RegClass);
27012 return std::make_pair(0U, &X86::VR512RegClass);
27018 // Use the default implementation in TargetLowering to convert the register
27019 // constraint into a member of a register class.
27020 std::pair<unsigned, const TargetRegisterClass*> Res;
27021 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27023 // Not found as a standard register?
27025 // Map st(0) -> st(7) -> ST0
27026 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27027 tolower(Constraint[1]) == 's' &&
27028 tolower(Constraint[2]) == 't' &&
27029 Constraint[3] == '(' &&
27030 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27031 Constraint[5] == ')' &&
27032 Constraint[6] == '}') {
27034 Res.first = X86::FP0+Constraint[4]-'0';
27035 Res.second = &X86::RFP80RegClass;
27039 // GCC allows "st(0)" to be called just plain "st".
27040 if (StringRef("{st}").equals_lower(Constraint)) {
27041 Res.first = X86::FP0;
27042 Res.second = &X86::RFP80RegClass;
27047 if (StringRef("{flags}").equals_lower(Constraint)) {
27048 Res.first = X86::EFLAGS;
27049 Res.second = &X86::CCRRegClass;
27053 // 'A' means EAX + EDX.
27054 if (Constraint == "A") {
27055 Res.first = X86::EAX;
27056 Res.second = &X86::GR32_ADRegClass;
27062 // Otherwise, check to see if this is a register class of the wrong value
27063 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27064 // turn into {ax},{dx}.
27065 if (Res.second->hasType(VT))
27066 return Res; // Correct type already, nothing to do.
27068 // All of the single-register GCC register classes map their values onto
27069 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27070 // really want an 8-bit or 32-bit register, map to the appropriate register
27071 // class and return the appropriate register.
27072 if (Res.second == &X86::GR16RegClass) {
27073 if (VT == MVT::i8 || VT == MVT::i1) {
27074 unsigned DestReg = 0;
27075 switch (Res.first) {
27077 case X86::AX: DestReg = X86::AL; break;
27078 case X86::DX: DestReg = X86::DL; break;
27079 case X86::CX: DestReg = X86::CL; break;
27080 case X86::BX: DestReg = X86::BL; break;
27083 Res.first = DestReg;
27084 Res.second = &X86::GR8RegClass;
27086 } else if (VT == MVT::i32 || VT == MVT::f32) {
27087 unsigned DestReg = 0;
27088 switch (Res.first) {
27090 case X86::AX: DestReg = X86::EAX; break;
27091 case X86::DX: DestReg = X86::EDX; break;
27092 case X86::CX: DestReg = X86::ECX; break;
27093 case X86::BX: DestReg = X86::EBX; break;
27094 case X86::SI: DestReg = X86::ESI; break;
27095 case X86::DI: DestReg = X86::EDI; break;
27096 case X86::BP: DestReg = X86::EBP; break;
27097 case X86::SP: DestReg = X86::ESP; break;
27100 Res.first = DestReg;
27101 Res.second = &X86::GR32RegClass;
27103 } else if (VT == MVT::i64 || VT == MVT::f64) {
27104 unsigned DestReg = 0;
27105 switch (Res.first) {
27107 case X86::AX: DestReg = X86::RAX; break;
27108 case X86::DX: DestReg = X86::RDX; break;
27109 case X86::CX: DestReg = X86::RCX; break;
27110 case X86::BX: DestReg = X86::RBX; break;
27111 case X86::SI: DestReg = X86::RSI; break;
27112 case X86::DI: DestReg = X86::RDI; break;
27113 case X86::BP: DestReg = X86::RBP; break;
27114 case X86::SP: DestReg = X86::RSP; break;
27117 Res.first = DestReg;
27118 Res.second = &X86::GR64RegClass;
27121 } else if (Res.second == &X86::FR32RegClass ||
27122 Res.second == &X86::FR64RegClass ||
27123 Res.second == &X86::VR128RegClass ||
27124 Res.second == &X86::VR256RegClass ||
27125 Res.second == &X86::FR32XRegClass ||
27126 Res.second == &X86::FR64XRegClass ||
27127 Res.second == &X86::VR128XRegClass ||
27128 Res.second == &X86::VR256XRegClass ||
27129 Res.second == &X86::VR512RegClass) {
27130 // Handle references to XMM physical registers that got mapped into the
27131 // wrong class. This can happen with constraints like {xmm0} where the
27132 // target independent register mapper will just pick the first match it can
27133 // find, ignoring the required type.
27135 if (VT == MVT::f32 || VT == MVT::i32)
27136 Res.second = &X86::FR32RegClass;
27137 else if (VT == MVT::f64 || VT == MVT::i64)
27138 Res.second = &X86::FR64RegClass;
27139 else if (X86::VR128RegClass.hasType(VT))
27140 Res.second = &X86::VR128RegClass;
27141 else if (X86::VR256RegClass.hasType(VT))
27142 Res.second = &X86::VR256RegClass;
27143 else if (X86::VR512RegClass.hasType(VT))
27144 Res.second = &X86::VR512RegClass;
27150 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27152 // Scaling factors are not free at all.
27153 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27154 // will take 2 allocations in the out of order engine instead of 1
27155 // for plain addressing mode, i.e. inst (reg1).
27157 // vaddps (%rsi,%drx), %ymm0, %ymm1
27158 // Requires two allocations (one for the load, one for the computation)
27160 // vaddps (%rsi), %ymm0, %ymm1
27161 // Requires just 1 allocation, i.e., freeing allocations for other operations
27162 // and having less micro operations to execute.
27164 // For some X86 architectures, this is even worse because for instance for
27165 // stores, the complex addressing mode forces the instruction to use the
27166 // "load" ports instead of the dedicated "store" port.
27167 // E.g., on Haswell:
27168 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27169 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27170 if (isLegalAddressingMode(AM, Ty))
27171 // Scale represents reg2 * scale, thus account for 1
27172 // as soon as we use a second register.
27173 return AM.Scale != 0;
27177 bool X86TargetLowering::isTargetFTOL() const {
27178 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();