1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::VSELECT);
1679 setTargetDAGCombine(ISD::SELECT);
1680 setTargetDAGCombine(ISD::SHL);
1681 setTargetDAGCombine(ISD::SRA);
1682 setTargetDAGCombine(ISD::SRL);
1683 setTargetDAGCombine(ISD::OR);
1684 setTargetDAGCombine(ISD::AND);
1685 setTargetDAGCombine(ISD::ADD);
1686 setTargetDAGCombine(ISD::FADD);
1687 setTargetDAGCombine(ISD::FSUB);
1688 setTargetDAGCombine(ISD::FMA);
1689 setTargetDAGCombine(ISD::SUB);
1690 setTargetDAGCombine(ISD::LOAD);
1691 setTargetDAGCombine(ISD::MLOAD);
1692 setTargetDAGCombine(ISD::STORE);
1693 setTargetDAGCombine(ISD::MSTORE);
1694 setTargetDAGCombine(ISD::ZERO_EXTEND);
1695 setTargetDAGCombine(ISD::ANY_EXTEND);
1696 setTargetDAGCombine(ISD::SIGN_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1698 setTargetDAGCombine(ISD::TRUNCATE);
1699 setTargetDAGCombine(ISD::SINT_TO_FP);
1700 setTargetDAGCombine(ISD::SETCC);
1701 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1702 setTargetDAGCombine(ISD::BUILD_VECTOR);
1703 setTargetDAGCombine(ISD::MUL);
1704 setTargetDAGCombine(ISD::XOR);
1706 computeRegisterProperties();
1708 // On Darwin, -Os means optimize for size without hurting performance,
1709 // do not reduce the limit.
1710 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1711 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1712 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1713 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1714 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1715 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1716 setPrefLoopAlignment(4); // 2^4 bytes.
1718 // Predictable cmov don't hurt on atom because it's in-order.
1719 PredictableSelectIsExpensive = !Subtarget->isAtom();
1720 EnableExtLdPromotion = true;
1721 setPrefFunctionAlignment(4); // 2^4 bytes.
1723 verifyIntrinsicTables();
1726 // This has so far only been implemented for 64-bit MachO.
1727 bool X86TargetLowering::useLoadStackGuardNode() const {
1728 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1731 TargetLoweringBase::LegalizeTypeAction
1732 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1733 if (ExperimentalVectorWideningLegalization &&
1734 VT.getVectorNumElements() != 1 &&
1735 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1736 return TypeWidenVector;
1738 return TargetLoweringBase::getPreferredVectorAction(VT);
1741 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1743 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1745 const unsigned NumElts = VT.getVectorNumElements();
1746 const EVT EltVT = VT.getVectorElementType();
1747 if (VT.is512BitVector()) {
1748 if (Subtarget->hasAVX512())
1749 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1750 EltVT == MVT::f32 || EltVT == MVT::f64)
1752 case 8: return MVT::v8i1;
1753 case 16: return MVT::v16i1;
1755 if (Subtarget->hasBWI())
1756 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1758 case 32: return MVT::v32i1;
1759 case 64: return MVT::v64i1;
1763 if (VT.is256BitVector() || VT.is128BitVector()) {
1764 if (Subtarget->hasVLX())
1765 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1766 EltVT == MVT::f32 || EltVT == MVT::f64)
1768 case 2: return MVT::v2i1;
1769 case 4: return MVT::v4i1;
1770 case 8: return MVT::v8i1;
1772 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1773 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1775 case 8: return MVT::v8i1;
1776 case 16: return MVT::v16i1;
1777 case 32: return MVT::v32i1;
1781 return VT.changeVectorElementTypeToInteger();
1784 /// Helper for getByValTypeAlignment to determine
1785 /// the desired ByVal argument alignment.
1786 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1789 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1790 if (VTy->getBitWidth() == 128)
1792 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1793 unsigned EltAlign = 0;
1794 getMaxByValAlign(ATy->getElementType(), EltAlign);
1795 if (EltAlign > MaxAlign)
1796 MaxAlign = EltAlign;
1797 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1798 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1799 unsigned EltAlign = 0;
1800 getMaxByValAlign(STy->getElementType(i), EltAlign);
1801 if (EltAlign > MaxAlign)
1802 MaxAlign = EltAlign;
1809 /// Return the desired alignment for ByVal aggregate
1810 /// function arguments in the caller parameter area. For X86, aggregates
1811 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1812 /// are at 4-byte boundaries.
1813 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1814 if (Subtarget->is64Bit()) {
1815 // Max of 8 and alignment of type.
1816 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1823 if (Subtarget->hasSSE1())
1824 getMaxByValAlign(Ty, Align);
1828 /// Returns the target specific optimal type for load
1829 /// and store operations as a result of memset, memcpy, and memmove
1830 /// lowering. If DstAlign is zero that means it's safe to destination
1831 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1832 /// means there isn't a need to check it against alignment requirement,
1833 /// probably because the source does not need to be loaded. If 'IsMemset' is
1834 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1835 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1836 /// source is constant so it does not need to be loaded.
1837 /// It returns EVT::Other if the type should be determined using generic
1838 /// target-independent logic.
1840 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1841 unsigned DstAlign, unsigned SrcAlign,
1842 bool IsMemset, bool ZeroMemset,
1844 MachineFunction &MF) const {
1845 const Function *F = MF.getFunction();
1846 if ((!IsMemset || ZeroMemset) &&
1847 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
1848 Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2109 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() &&
2110 (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
2111 MachineFunction &MF = DAG.getMachineFunction();
2112 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2113 unsigned Reg = FuncInfo->getSRetReturnReg();
2115 "SRetReturnReg should have been set in LowerFormalArguments().");
2116 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
2119 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2120 X86::RAX : X86::EAX;
2121 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2122 Flag = Chain.getValue(1);
2124 // RAX/EAX now acts like a return value.
2125 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2128 RetOps[0] = Chain; // Update chain.
2130 // Add the flag if we have it.
2132 RetOps.push_back(Flag);
2134 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2137 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2138 if (N->getNumValues() != 1)
2140 if (!N->hasNUsesOfValue(1, 0))
2143 SDValue TCChain = Chain;
2144 SDNode *Copy = *N->use_begin();
2145 if (Copy->getOpcode() == ISD::CopyToReg) {
2146 // If the copy has a glue operand, we conservatively assume it isn't safe to
2147 // perform a tail call.
2148 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2150 TCChain = Copy->getOperand(0);
2151 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2154 bool HasRet = false;
2155 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2157 if (UI->getOpcode() != X86ISD::RET_FLAG)
2159 // If we are returning more than one value, we can definitely
2160 // not make a tail call see PR19530
2161 if (UI->getNumOperands() > 4)
2163 if (UI->getNumOperands() == 4 &&
2164 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2177 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2178 ISD::NodeType ExtendKind) const {
2180 // TODO: Is this also valid on 32-bit?
2181 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2182 ReturnMVT = MVT::i8;
2184 ReturnMVT = MVT::i32;
2186 EVT MinVT = getRegisterType(Context, ReturnMVT);
2187 return VT.bitsLT(MinVT) ? MinVT : VT;
2190 /// Lower the result values of a call into the
2191 /// appropriate copies out of appropriate physical registers.
2194 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2195 CallingConv::ID CallConv, bool isVarArg,
2196 const SmallVectorImpl<ISD::InputArg> &Ins,
2197 SDLoc dl, SelectionDAG &DAG,
2198 SmallVectorImpl<SDValue> &InVals) const {
2200 // Assign locations to each value returned by this call.
2201 SmallVector<CCValAssign, 16> RVLocs;
2202 bool Is64Bit = Subtarget->is64Bit();
2203 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2205 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2207 // Copy all of the result registers out of their specified physreg.
2208 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2209 CCValAssign &VA = RVLocs[i];
2210 EVT CopyVT = VA.getValVT();
2212 // If this is x86-64, and we disabled SSE, we can't return FP values
2213 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2214 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2215 report_fatal_error("SSE register return with SSE disabled");
2218 // If we prefer to use the value in xmm registers, copy it out as f80 and
2219 // use a truncate to move it from fp stack reg to xmm reg.
2220 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2221 isScalarFPTypeInSSEReg(VA.getValVT()))
2224 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2225 CopyVT, InFlag).getValue(1);
2226 SDValue Val = Chain.getValue(0);
2228 if (CopyVT != VA.getValVT())
2229 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2230 // This truncation won't change the value.
2231 DAG.getIntPtrConstant(1));
2233 InFlag = Chain.getValue(2);
2234 InVals.push_back(Val);
2240 //===----------------------------------------------------------------------===//
2241 // C & StdCall & Fast Calling Convention implementation
2242 //===----------------------------------------------------------------------===//
2243 // StdCall calling convention seems to be standard for many Windows' API
2244 // routines and around. It differs from C calling convention just a little:
2245 // callee should clean up the stack, not caller. Symbols should be also
2246 // decorated in some fancy way :) It doesn't support any vector arguments.
2247 // For info on fast calling convention see Fast Calling Convention (tail call)
2248 // implementation LowerX86_32FastCCCallTo.
2250 /// CallIsStructReturn - Determines whether a call uses struct return
2252 enum StructReturnType {
2257 static StructReturnType
2258 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2260 return NotStructReturn;
2262 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2263 if (!Flags.isSRet())
2264 return NotStructReturn;
2265 if (Flags.isInReg())
2266 return RegStructReturn;
2267 return StackStructReturn;
2270 /// Determines whether a function uses struct return semantics.
2271 static StructReturnType
2272 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2274 return NotStructReturn;
2276 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2277 if (!Flags.isSRet())
2278 return NotStructReturn;
2279 if (Flags.isInReg())
2280 return RegStructReturn;
2281 return StackStructReturn;
2284 /// Make a copy of an aggregate at address specified by "Src" to address
2285 /// "Dst" with size and alignment information specified by the specific
2286 /// parameter attribute. The copy will be passed as a byval function parameter.
2288 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2289 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2291 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2293 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2294 /*isVolatile*/false, /*AlwaysInline=*/true,
2295 MachinePointerInfo(), MachinePointerInfo());
2298 /// Return true if the calling convention is one that
2299 /// supports tail call optimization.
2300 static bool IsTailCallConvention(CallingConv::ID CC) {
2301 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2302 CC == CallingConv::HiPE);
2305 /// \brief Return true if the calling convention is a C calling convention.
2306 static bool IsCCallConvention(CallingConv::ID CC) {
2307 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2308 CC == CallingConv::X86_64_SysV);
2311 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2312 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2316 CallingConv::ID CalleeCC = CS.getCallingConv();
2317 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2323 /// Return true if the function is being made into
2324 /// a tailcall target by changing its ABI.
2325 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2326 bool GuaranteedTailCallOpt) {
2327 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2331 X86TargetLowering::LowerMemArgument(SDValue Chain,
2332 CallingConv::ID CallConv,
2333 const SmallVectorImpl<ISD::InputArg> &Ins,
2334 SDLoc dl, SelectionDAG &DAG,
2335 const CCValAssign &VA,
2336 MachineFrameInfo *MFI,
2338 // Create the nodes corresponding to a load from this parameter slot.
2339 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2340 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2341 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2342 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2345 // If value is passed by pointer we have address passed instead of the value
2347 if (VA.getLocInfo() == CCValAssign::Indirect)
2348 ValVT = VA.getLocVT();
2350 ValVT = VA.getValVT();
2352 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2353 // changed with more analysis.
2354 // In case of tail call optimization mark all arguments mutable. Since they
2355 // could be overwritten by lowering of arguments in case of a tail call.
2356 if (Flags.isByVal()) {
2357 unsigned Bytes = Flags.getByValSize();
2358 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2359 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2360 return DAG.getFrameIndex(FI, getPointerTy());
2362 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2363 VA.getLocMemOffset(), isImmutable);
2364 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2365 return DAG.getLoad(ValVT, dl, Chain, FIN,
2366 MachinePointerInfo::getFixedStack(FI),
2367 false, false, false, 0);
2371 // FIXME: Get this from tablegen.
2372 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2373 const X86Subtarget *Subtarget) {
2374 assert(Subtarget->is64Bit());
2376 if (Subtarget->isCallingConvWin64(CallConv)) {
2377 static const MCPhysReg GPR64ArgRegsWin64[] = {
2378 X86::RCX, X86::RDX, X86::R8, X86::R9
2380 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2383 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2384 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2386 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2389 // FIXME: Get this from tablegen.
2390 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2391 CallingConv::ID CallConv,
2392 const X86Subtarget *Subtarget) {
2393 assert(Subtarget->is64Bit());
2394 if (Subtarget->isCallingConvWin64(CallConv)) {
2395 // The XMM registers which might contain var arg parameters are shadowed
2396 // in their paired GPR. So we only need to save the GPR to their home
2398 // TODO: __vectorcall will change this.
2402 const Function *Fn = MF.getFunction();
2403 bool NoImplicitFloatOps = Fn->getAttributes().
2404 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
2573 Attribute::NoImplicitFloat)) &&
2574 "SSE register cannot be used when SSE is disabled!");
2576 // 64-bit calling conventions support varargs and register parameters, so we
2577 // have to do extra work to spill them in the prologue.
2578 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2579 // Find the first unallocated argument registers.
2580 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2581 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2582 unsigned NumIntRegs =
2583 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2584 unsigned NumXMMRegs =
2585 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2586 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2587 "SSE register cannot be used when SSE is disabled!");
2589 // Gather all the live in physical registers.
2590 SmallVector<SDValue, 6> LiveGPRs;
2591 SmallVector<SDValue, 8> LiveXMMRegs;
2593 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2594 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2596 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2598 if (!ArgXMMs.empty()) {
2599 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2600 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2601 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2602 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2603 LiveXMMRegs.push_back(
2604 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2609 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2610 // Get to the caller-allocated home save location. Add 8 to account
2611 // for the return address.
2612 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2613 FuncInfo->setRegSaveFrameIndex(
2614 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2615 // Fixup to set vararg frame on shadow area (4 x i64).
2617 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2619 // For X86-64, if there are vararg parameters that are passed via
2620 // registers, then we must store them to their spots on the stack so
2621 // they may be loaded by deferencing the result of va_next.
2622 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2623 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2624 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2625 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2628 // Store the integer parameter registers.
2629 SmallVector<SDValue, 8> MemOps;
2630 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2632 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2633 for (SDValue Val : LiveGPRs) {
2634 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2635 DAG.getIntPtrConstant(Offset));
2637 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2638 MachinePointerInfo::getFixedStack(
2639 FuncInfo->getRegSaveFrameIndex(), Offset),
2641 MemOps.push_back(Store);
2645 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2646 // Now store the XMM (fp + vector) parameter registers.
2647 SmallVector<SDValue, 12> SaveXMMOps;
2648 SaveXMMOps.push_back(Chain);
2649 SaveXMMOps.push_back(ALVal);
2650 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2651 FuncInfo->getRegSaveFrameIndex()));
2652 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2653 FuncInfo->getVarArgsFPOffset()));
2654 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2656 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2657 MVT::Other, SaveXMMOps));
2660 if (!MemOps.empty())
2661 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2664 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2665 // Find the largest legal vector type.
2666 MVT VecVT = MVT::Other;
2667 // FIXME: Only some x86_32 calling conventions support AVX512.
2668 if (Subtarget->hasAVX512() &&
2669 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2670 CallConv == CallingConv::Intel_OCL_BI)))
2671 VecVT = MVT::v16f32;
2672 else if (Subtarget->hasAVX())
2674 else if (Subtarget->hasSSE2())
2677 // We forward some GPRs and some vector types.
2678 SmallVector<MVT, 2> RegParmTypes;
2679 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2680 RegParmTypes.push_back(IntVT);
2681 if (VecVT != MVT::Other)
2682 RegParmTypes.push_back(VecVT);
2684 // Compute the set of forwarded registers. The rest are scratch.
2685 SmallVectorImpl<ForwardedRegister> &Forwards =
2686 FuncInfo->getForwardedMustTailRegParms();
2687 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2689 // Conservatively forward AL on x86_64, since it might be used for varargs.
2690 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2691 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2692 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2695 // Copy all forwards from physical to virtual registers.
2696 for (ForwardedRegister &F : Forwards) {
2697 // FIXME: Can we use a less constrained schedule?
2698 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2699 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2700 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2704 // Some CCs need callee pop.
2705 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2706 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2707 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2709 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2710 // If this is an sret function, the return should pop the hidden pointer.
2711 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2712 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2713 argsAreStructReturn(Ins) == StackStructReturn)
2714 FuncInfo->setBytesToPopOnReturn(4);
2718 // RegSaveFrameIndex is X86-64 only.
2719 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2720 if (CallConv == CallingConv::X86_FastCall ||
2721 CallConv == CallingConv::X86_ThisCall)
2722 // fastcc functions can't have varargs.
2723 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2726 FuncInfo->setArgumentStackSize(StackSize);
2732 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2733 SDValue StackPtr, SDValue Arg,
2734 SDLoc dl, SelectionDAG &DAG,
2735 const CCValAssign &VA,
2736 ISD::ArgFlagsTy Flags) const {
2737 unsigned LocMemOffset = VA.getLocMemOffset();
2738 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2739 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2740 if (Flags.isByVal())
2741 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2743 return DAG.getStore(Chain, dl, Arg, PtrOff,
2744 MachinePointerInfo::getStack(LocMemOffset),
2748 /// Emit a load of return address if tail call
2749 /// optimization is performed and it is required.
2751 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2752 SDValue &OutRetAddr, SDValue Chain,
2753 bool IsTailCall, bool Is64Bit,
2754 int FPDiff, SDLoc dl) const {
2755 // Adjust the Return address stack slot.
2756 EVT VT = getPointerTy();
2757 OutRetAddr = getReturnAddressFrameIndex(DAG);
2759 // Load the "old" Return address.
2760 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2761 false, false, false, 0);
2762 return SDValue(OutRetAddr.getNode(), 1);
2765 /// Emit a store of the return address if tail call
2766 /// optimization is performed and it is required (FPDiff!=0).
2767 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2768 SDValue Chain, SDValue RetAddrFrIdx,
2769 EVT PtrVT, unsigned SlotSize,
2770 int FPDiff, SDLoc dl) {
2771 // Store the return address to the appropriate stack slot.
2772 if (!FPDiff) return Chain;
2773 // Calculate the new stack slot for the return address.
2774 int NewReturnAddrFI =
2775 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2777 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2778 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2779 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2785 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2786 SmallVectorImpl<SDValue> &InVals) const {
2787 SelectionDAG &DAG = CLI.DAG;
2789 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2790 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2791 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2792 SDValue Chain = CLI.Chain;
2793 SDValue Callee = CLI.Callee;
2794 CallingConv::ID CallConv = CLI.CallConv;
2795 bool &isTailCall = CLI.IsTailCall;
2796 bool isVarArg = CLI.IsVarArg;
2798 MachineFunction &MF = DAG.getMachineFunction();
2799 bool Is64Bit = Subtarget->is64Bit();
2800 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2801 StructReturnType SR = callIsStructReturn(Outs);
2802 bool IsSibcall = false;
2803 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2805 if (MF.getTarget().Options.DisableTailCalls)
2808 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2810 // Force this to be a tail call. The verifier rules are enough to ensure
2811 // that we can lower this successfully without moving the return address
2814 } else if (isTailCall) {
2815 // Check if it's really possible to do a tail call.
2816 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2817 isVarArg, SR != NotStructReturn,
2818 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2819 Outs, OutVals, Ins, DAG);
2821 // Sibcalls are automatically detected tailcalls which do not require
2823 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2830 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2831 "Var args not supported with calling convention fastcc, ghc or hipe");
2833 // Analyze operands of the call, assigning locations to each operand.
2834 SmallVector<CCValAssign, 16> ArgLocs;
2835 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2837 // Allocate shadow area for Win64
2839 CCInfo.AllocateStack(32, 8);
2841 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2843 // Get a count of how many bytes are to be pushed on the stack.
2844 unsigned NumBytes = CCInfo.getNextStackOffset();
2846 // This is a sibcall. The memory operands are available in caller's
2847 // own caller's stack.
2849 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2850 IsTailCallConvention(CallConv))
2851 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2854 if (isTailCall && !IsSibcall && !IsMustTail) {
2855 // Lower arguments at fp - stackoffset + fpdiff.
2856 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2858 FPDiff = NumBytesCallerPushed - NumBytes;
2860 // Set the delta of movement of the returnaddr stackslot.
2861 // But only set if delta is greater than previous delta.
2862 if (FPDiff < X86Info->getTCReturnAddrDelta())
2863 X86Info->setTCReturnAddrDelta(FPDiff);
2866 unsigned NumBytesToPush = NumBytes;
2867 unsigned NumBytesToPop = NumBytes;
2869 // If we have an inalloca argument, all stack space has already been allocated
2870 // for us and be right at the top of the stack. We don't support multiple
2871 // arguments passed in memory when using inalloca.
2872 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2874 if (!ArgLocs.back().isMemLoc())
2875 report_fatal_error("cannot use inalloca attribute on a register "
2877 if (ArgLocs.back().getLocMemOffset() != 0)
2878 report_fatal_error("any parameter with the inalloca attribute must be "
2879 "the only memory argument");
2883 Chain = DAG.getCALLSEQ_START(
2884 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2886 SDValue RetAddrFrIdx;
2887 // Load return address for tail calls.
2888 if (isTailCall && FPDiff)
2889 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2890 Is64Bit, FPDiff, dl);
2892 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2893 SmallVector<SDValue, 8> MemOpChains;
2896 // Walk the register/memloc assignments, inserting copies/loads. In the case
2897 // of tail call optimization arguments are handle later.
2898 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2899 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2900 // Skip inalloca arguments, they have already been written.
2901 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2902 if (Flags.isInAlloca())
2905 CCValAssign &VA = ArgLocs[i];
2906 EVT RegVT = VA.getLocVT();
2907 SDValue Arg = OutVals[i];
2908 bool isByVal = Flags.isByVal();
2910 // Promote the value if needed.
2911 switch (VA.getLocInfo()) {
2912 default: llvm_unreachable("Unknown loc info!");
2913 case CCValAssign::Full: break;
2914 case CCValAssign::SExt:
2915 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2917 case CCValAssign::ZExt:
2918 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2920 case CCValAssign::AExt:
2921 if (RegVT.is128BitVector()) {
2922 // Special case: passing MMX values in XMM registers.
2923 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2924 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2925 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2927 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2929 case CCValAssign::BCvt:
2930 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2932 case CCValAssign::Indirect: {
2933 // Store the argument.
2934 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2935 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2936 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2937 MachinePointerInfo::getFixedStack(FI),
2944 if (VA.isRegLoc()) {
2945 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2946 if (isVarArg && IsWin64) {
2947 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2948 // shadow reg if callee is a varargs function.
2949 unsigned ShadowReg = 0;
2950 switch (VA.getLocReg()) {
2951 case X86::XMM0: ShadowReg = X86::RCX; break;
2952 case X86::XMM1: ShadowReg = X86::RDX; break;
2953 case X86::XMM2: ShadowReg = X86::R8; break;
2954 case X86::XMM3: ShadowReg = X86::R9; break;
2957 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2959 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2960 assert(VA.isMemLoc());
2961 if (!StackPtr.getNode())
2962 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2964 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2965 dl, DAG, VA, Flags));
2969 if (!MemOpChains.empty())
2970 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2972 if (Subtarget->isPICStyleGOT()) {
2973 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2976 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2977 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2979 // If we are tail calling and generating PIC/GOT style code load the
2980 // address of the callee into ECX. The value in ecx is used as target of
2981 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2982 // for tail calls on PIC/GOT architectures. Normally we would just put the
2983 // address of GOT into ebx and then call target@PLT. But for tail calls
2984 // ebx would be restored (since ebx is callee saved) before jumping to the
2987 // Note: The actual moving to ECX is done further down.
2988 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2989 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2990 !G->getGlobal()->hasProtectedVisibility())
2991 Callee = LowerGlobalAddress(Callee, DAG);
2992 else if (isa<ExternalSymbolSDNode>(Callee))
2993 Callee = LowerExternalSymbol(Callee, DAG);
2997 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2998 // From AMD64 ABI document:
2999 // For calls that may call functions that use varargs or stdargs
3000 // (prototype-less calls or calls to functions containing ellipsis (...) in
3001 // the declaration) %al is used as hidden argument to specify the number
3002 // of SSE registers used. The contents of %al do not need to match exactly
3003 // the number of registers, but must be an ubound on the number of SSE
3004 // registers used and is in the range 0 - 8 inclusive.
3006 // Count the number of XMM registers allocated.
3007 static const MCPhysReg XMMArgRegs[] = {
3008 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3009 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3011 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3012 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3013 && "SSE registers cannot be used when SSE is disabled");
3015 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3016 DAG.getConstant(NumXMMRegs, MVT::i8)));
3019 if (isVarArg && IsMustTail) {
3020 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3021 for (const auto &F : Forwards) {
3022 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3023 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3027 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3028 // don't need this because the eligibility check rejects calls that require
3029 // shuffling arguments passed in memory.
3030 if (!IsSibcall && isTailCall) {
3031 // Force all the incoming stack arguments to be loaded from the stack
3032 // before any new outgoing arguments are stored to the stack, because the
3033 // outgoing stack slots may alias the incoming argument stack slots, and
3034 // the alias isn't otherwise explicit. This is slightly more conservative
3035 // than necessary, because it means that each store effectively depends
3036 // on every argument instead of just those arguments it would clobber.
3037 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3039 SmallVector<SDValue, 8> MemOpChains2;
3042 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3043 CCValAssign &VA = ArgLocs[i];
3046 assert(VA.isMemLoc());
3047 SDValue Arg = OutVals[i];
3048 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3049 // Skip inalloca arguments. They don't require any work.
3050 if (Flags.isInAlloca())
3052 // Create frame index.
3053 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3054 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3055 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3056 FIN = DAG.getFrameIndex(FI, getPointerTy());
3058 if (Flags.isByVal()) {
3059 // Copy relative to framepointer.
3060 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3061 if (!StackPtr.getNode())
3062 StackPtr = DAG.getCopyFromReg(Chain, dl,
3063 RegInfo->getStackRegister(),
3065 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3067 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3071 // Store relative to framepointer.
3072 MemOpChains2.push_back(
3073 DAG.getStore(ArgChain, dl, Arg, FIN,
3074 MachinePointerInfo::getFixedStack(FI),
3079 if (!MemOpChains2.empty())
3080 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3082 // Store the return address to the appropriate stack slot.
3083 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3084 getPointerTy(), RegInfo->getSlotSize(),
3088 // Build a sequence of copy-to-reg nodes chained together with token chain
3089 // and flag operands which copy the outgoing args into registers.
3091 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3092 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3093 RegsToPass[i].second, InFlag);
3094 InFlag = Chain.getValue(1);
3097 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3098 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3099 // In the 64-bit large code model, we have to make all calls
3100 // through a register, since the call instruction's 32-bit
3101 // pc-relative offset may not be large enough to hold the whole
3103 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3104 // If the callee is a GlobalAddress node (quite common, every direct call
3105 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3107 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3109 // We should use extra load for direct calls to dllimported functions in
3111 const GlobalValue *GV = G->getGlobal();
3112 if (!GV->hasDLLImportStorageClass()) {
3113 unsigned char OpFlags = 0;
3114 bool ExtraLoad = false;
3115 unsigned WrapperKind = ISD::DELETED_NODE;
3117 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3118 // external symbols most go through the PLT in PIC mode. If the symbol
3119 // has hidden or protected visibility, or if it is static or local, then
3120 // we don't need to use the PLT - we can directly call it.
3121 if (Subtarget->isTargetELF() &&
3122 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3123 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3124 OpFlags = X86II::MO_PLT;
3125 } else if (Subtarget->isPICStyleStubAny() &&
3126 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3127 (!Subtarget->getTargetTriple().isMacOSX() ||
3128 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3129 // PC-relative references to external symbols should go through $stub,
3130 // unless we're building with the leopard linker or later, which
3131 // automatically synthesizes these stubs.
3132 OpFlags = X86II::MO_DARWIN_STUB;
3133 } else if (Subtarget->isPICStyleRIPRel() &&
3134 isa<Function>(GV) &&
3135 cast<Function>(GV)->getAttributes().
3136 hasAttribute(AttributeSet::FunctionIndex,
3137 Attribute::NonLazyBind)) {
3138 // If the function is marked as non-lazy, generate an indirect call
3139 // which loads from the GOT directly. This avoids runtime overhead
3140 // at the cost of eager binding (and one extra byte of encoding).
3141 OpFlags = X86II::MO_GOTPCREL;
3142 WrapperKind = X86ISD::WrapperRIP;
3146 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3147 G->getOffset(), OpFlags);
3149 // Add a wrapper if needed.
3150 if (WrapperKind != ISD::DELETED_NODE)
3151 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3152 // Add extra indirection if needed.
3154 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3155 MachinePointerInfo::getGOT(),
3156 false, false, false, 0);
3158 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3159 unsigned char OpFlags = 0;
3161 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3162 // external symbols should go through the PLT.
3163 if (Subtarget->isTargetELF() &&
3164 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3165 OpFlags = X86II::MO_PLT;
3166 } else if (Subtarget->isPICStyleStubAny() &&
3167 (!Subtarget->getTargetTriple().isMacOSX() ||
3168 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3169 // PC-relative references to external symbols should go through $stub,
3170 // unless we're building with the leopard linker or later, which
3171 // automatically synthesizes these stubs.
3172 OpFlags = X86II::MO_DARWIN_STUB;
3175 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3177 } else if (Subtarget->isTarget64BitILP32() &&
3178 Callee->getValueType(0) == MVT::i32) {
3179 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3180 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3183 // Returns a chain & a flag for retval copy to use.
3184 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3185 SmallVector<SDValue, 8> Ops;
3187 if (!IsSibcall && isTailCall) {
3188 Chain = DAG.getCALLSEQ_END(Chain,
3189 DAG.getIntPtrConstant(NumBytesToPop, true),
3190 DAG.getIntPtrConstant(0, true), InFlag, dl);
3191 InFlag = Chain.getValue(1);
3194 Ops.push_back(Chain);
3195 Ops.push_back(Callee);
3198 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3200 // Add argument registers to the end of the list so that they are known live
3202 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3203 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3204 RegsToPass[i].second.getValueType()));
3206 // Add a register mask operand representing the call-preserved registers.
3207 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3208 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3209 assert(Mask && "Missing call preserved mask for calling convention");
3210 Ops.push_back(DAG.getRegisterMask(Mask));
3212 if (InFlag.getNode())
3213 Ops.push_back(InFlag);
3217 //// If this is the first return lowered for this function, add the regs
3218 //// to the liveout set for the function.
3219 // This isn't right, although it's probably harmless on x86; liveouts
3220 // should be computed from returns not tail calls. Consider a void
3221 // function making a tail call to a function returning int.
3222 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3225 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3226 InFlag = Chain.getValue(1);
3228 // Create the CALLSEQ_END node.
3229 unsigned NumBytesForCalleeToPop;
3230 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3231 DAG.getTarget().Options.GuaranteedTailCallOpt))
3232 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3233 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3234 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3235 SR == StackStructReturn)
3236 // If this is a call to a struct-return function, the callee
3237 // pops the hidden struct pointer, so we have to push it back.
3238 // This is common for Darwin/X86, Linux & Mingw32 targets.
3239 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3240 NumBytesForCalleeToPop = 4;
3242 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3244 // Returns a flag for retval copy to use.
3246 Chain = DAG.getCALLSEQ_END(Chain,
3247 DAG.getIntPtrConstant(NumBytesToPop, true),
3248 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3251 InFlag = Chain.getValue(1);
3254 // Handle result values, copying them out of physregs into vregs that we
3256 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3257 Ins, dl, DAG, InVals);
3260 //===----------------------------------------------------------------------===//
3261 // Fast Calling Convention (tail call) implementation
3262 //===----------------------------------------------------------------------===//
3264 // Like std call, callee cleans arguments, convention except that ECX is
3265 // reserved for storing the tail called function address. Only 2 registers are
3266 // free for argument passing (inreg). Tail call optimization is performed
3268 // * tailcallopt is enabled
3269 // * caller/callee are fastcc
3270 // On X86_64 architecture with GOT-style position independent code only local
3271 // (within module) calls are supported at the moment.
3272 // To keep the stack aligned according to platform abi the function
3273 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3274 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3275 // If a tail called function callee has more arguments than the caller the
3276 // caller needs to make sure that there is room to move the RETADDR to. This is
3277 // achieved by reserving an area the size of the argument delta right after the
3278 // original RETADDR, but before the saved framepointer or the spilled registers
3279 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3291 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3292 /// for a 16 byte align requirement.
3294 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3295 SelectionDAG& DAG) const {
3296 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3297 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3298 unsigned StackAlignment = TFI.getStackAlignment();
3299 uint64_t AlignMask = StackAlignment - 1;
3300 int64_t Offset = StackSize;
3301 unsigned SlotSize = RegInfo->getSlotSize();
3302 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3303 // Number smaller than 12 so just add the difference.
3304 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3306 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3307 Offset = ((~AlignMask) & Offset) + StackAlignment +
3308 (StackAlignment-SlotSize);
3313 /// MatchingStackOffset - Return true if the given stack call argument is
3314 /// already available in the same position (relatively) of the caller's
3315 /// incoming argument stack.
3317 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3318 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3319 const X86InstrInfo *TII) {
3320 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3322 if (Arg.getOpcode() == ISD::CopyFromReg) {
3323 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3324 if (!TargetRegisterInfo::isVirtualRegister(VR))
3326 MachineInstr *Def = MRI->getVRegDef(VR);
3329 if (!Flags.isByVal()) {
3330 if (!TII->isLoadFromStackSlot(Def, FI))
3333 unsigned Opcode = Def->getOpcode();
3334 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3335 Opcode == X86::LEA64_32r) &&
3336 Def->getOperand(1).isFI()) {
3337 FI = Def->getOperand(1).getIndex();
3338 Bytes = Flags.getByValSize();
3342 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3343 if (Flags.isByVal())
3344 // ByVal argument is passed in as a pointer but it's now being
3345 // dereferenced. e.g.
3346 // define @foo(%struct.X* %A) {
3347 // tail call @bar(%struct.X* byval %A)
3350 SDValue Ptr = Ld->getBasePtr();
3351 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3354 FI = FINode->getIndex();
3355 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3356 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3357 FI = FINode->getIndex();
3358 Bytes = Flags.getByValSize();
3362 assert(FI != INT_MAX);
3363 if (!MFI->isFixedObjectIndex(FI))
3365 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3368 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3369 /// for tail call optimization. Targets which want to do tail call
3370 /// optimization should implement this function.
3372 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3373 CallingConv::ID CalleeCC,
3375 bool isCalleeStructRet,
3376 bool isCallerStructRet,
3378 const SmallVectorImpl<ISD::OutputArg> &Outs,
3379 const SmallVectorImpl<SDValue> &OutVals,
3380 const SmallVectorImpl<ISD::InputArg> &Ins,
3381 SelectionDAG &DAG) const {
3382 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3385 // If -tailcallopt is specified, make fastcc functions tail-callable.
3386 const MachineFunction &MF = DAG.getMachineFunction();
3387 const Function *CallerF = MF.getFunction();
3389 // If the function return type is x86_fp80 and the callee return type is not,
3390 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3391 // perform a tailcall optimization here.
3392 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3395 CallingConv::ID CallerCC = CallerF->getCallingConv();
3396 bool CCMatch = CallerCC == CalleeCC;
3397 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3398 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3400 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3401 if (IsTailCallConvention(CalleeCC) && CCMatch)
3406 // Look for obvious safe cases to perform tail call optimization that do not
3407 // require ABI changes. This is what gcc calls sibcall.
3409 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3410 // emit a special epilogue.
3411 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3412 if (RegInfo->needsStackRealignment(MF))
3415 // Also avoid sibcall optimization if either caller or callee uses struct
3416 // return semantics.
3417 if (isCalleeStructRet || isCallerStructRet)
3420 // An stdcall/thiscall caller is expected to clean up its arguments; the
3421 // callee isn't going to do that.
3422 // FIXME: this is more restrictive than needed. We could produce a tailcall
3423 // when the stack adjustment matches. For example, with a thiscall that takes
3424 // only one argument.
3425 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3426 CallerCC == CallingConv::X86_ThisCall))
3429 // Do not sibcall optimize vararg calls unless all arguments are passed via
3431 if (isVarArg && !Outs.empty()) {
3433 // Optimizing for varargs on Win64 is unlikely to be safe without
3434 // additional testing.
3435 if (IsCalleeWin64 || IsCallerWin64)
3438 SmallVector<CCValAssign, 16> ArgLocs;
3439 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3442 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3443 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3444 if (!ArgLocs[i].isRegLoc())
3448 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3449 // stack. Therefore, if it's not used by the call it is not safe to optimize
3450 // this into a sibcall.
3451 bool Unused = false;
3452 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3459 SmallVector<CCValAssign, 16> RVLocs;
3460 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3462 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3463 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3464 CCValAssign &VA = RVLocs[i];
3465 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3470 // If the calling conventions do not match, then we'd better make sure the
3471 // results are returned in the same way as what the caller expects.
3473 SmallVector<CCValAssign, 16> RVLocs1;
3474 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3476 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3478 SmallVector<CCValAssign, 16> RVLocs2;
3479 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3481 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3483 if (RVLocs1.size() != RVLocs2.size())
3485 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3486 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3488 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3490 if (RVLocs1[i].isRegLoc()) {
3491 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3494 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3500 // If the callee takes no arguments then go on to check the results of the
3502 if (!Outs.empty()) {
3503 // Check if stack adjustment is needed. For now, do not do this if any
3504 // argument is passed on the stack.
3505 SmallVector<CCValAssign, 16> ArgLocs;
3506 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3509 // Allocate shadow area for Win64
3511 CCInfo.AllocateStack(32, 8);
3513 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3514 if (CCInfo.getNextStackOffset()) {
3515 MachineFunction &MF = DAG.getMachineFunction();
3516 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3519 // Check if the arguments are already laid out in the right way as
3520 // the caller's fixed stack objects.
3521 MachineFrameInfo *MFI = MF.getFrameInfo();
3522 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3523 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3524 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3525 CCValAssign &VA = ArgLocs[i];
3526 SDValue Arg = OutVals[i];
3527 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3528 if (VA.getLocInfo() == CCValAssign::Indirect)
3530 if (!VA.isRegLoc()) {
3531 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3538 // If the tailcall address may be in a register, then make sure it's
3539 // possible to register allocate for it. In 32-bit, the call address can
3540 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3541 // callee-saved registers are restored. These happen to be the same
3542 // registers used to pass 'inreg' arguments so watch out for those.
3543 if (!Subtarget->is64Bit() &&
3544 ((!isa<GlobalAddressSDNode>(Callee) &&
3545 !isa<ExternalSymbolSDNode>(Callee)) ||
3546 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3547 unsigned NumInRegs = 0;
3548 // In PIC we need an extra register to formulate the address computation
3550 unsigned MaxInRegs =
3551 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3553 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3554 CCValAssign &VA = ArgLocs[i];
3557 unsigned Reg = VA.getLocReg();
3560 case X86::EAX: case X86::EDX: case X86::ECX:
3561 if (++NumInRegs == MaxInRegs)
3573 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3574 const TargetLibraryInfo *libInfo) const {
3575 return X86::createFastISel(funcInfo, libInfo);
3578 //===----------------------------------------------------------------------===//
3579 // Other Lowering Hooks
3580 //===----------------------------------------------------------------------===//
3582 static bool MayFoldLoad(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3586 static bool MayFoldIntoStore(SDValue Op) {
3587 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3590 static bool isTargetShuffle(unsigned Opcode) {
3592 default: return false;
3593 case X86ISD::BLENDI:
3594 case X86ISD::PSHUFB:
3595 case X86ISD::PSHUFD:
3596 case X86ISD::PSHUFHW:
3597 case X86ISD::PSHUFLW:
3599 case X86ISD::PALIGNR:
3600 case X86ISD::MOVLHPS:
3601 case X86ISD::MOVLHPD:
3602 case X86ISD::MOVHLPS:
3603 case X86ISD::MOVLPS:
3604 case X86ISD::MOVLPD:
3605 case X86ISD::MOVSHDUP:
3606 case X86ISD::MOVSLDUP:
3607 case X86ISD::MOVDDUP:
3610 case X86ISD::UNPCKL:
3611 case X86ISD::UNPCKH:
3612 case X86ISD::VPERMILPI:
3613 case X86ISD::VPERM2X128:
3614 case X86ISD::VPERMI:
3619 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3620 SDValue V1, SelectionDAG &DAG) {
3622 default: llvm_unreachable("Unknown x86 shuffle node");
3623 case X86ISD::MOVSHDUP:
3624 case X86ISD::MOVSLDUP:
3625 case X86ISD::MOVDDUP:
3626 return DAG.getNode(Opc, dl, VT, V1);
3630 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3631 SDValue V1, unsigned TargetMask,
3632 SelectionDAG &DAG) {
3634 default: llvm_unreachable("Unknown x86 shuffle node");
3635 case X86ISD::PSHUFD:
3636 case X86ISD::PSHUFHW:
3637 case X86ISD::PSHUFLW:
3638 case X86ISD::VPERMILPI:
3639 case X86ISD::VPERMI:
3640 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3644 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3645 SDValue V1, SDValue V2, unsigned TargetMask,
3646 SelectionDAG &DAG) {
3648 default: llvm_unreachable("Unknown x86 shuffle node");
3649 case X86ISD::PALIGNR:
3650 case X86ISD::VALIGN:
3652 case X86ISD::VPERM2X128:
3653 return DAG.getNode(Opc, dl, VT, V1, V2,
3654 DAG.getConstant(TargetMask, MVT::i8));
3658 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3659 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3661 default: llvm_unreachable("Unknown x86 shuffle node");
3662 case X86ISD::MOVLHPS:
3663 case X86ISD::MOVLHPD:
3664 case X86ISD::MOVHLPS:
3665 case X86ISD::MOVLPS:
3666 case X86ISD::MOVLPD:
3669 case X86ISD::UNPCKL:
3670 case X86ISD::UNPCKH:
3671 return DAG.getNode(Opc, dl, VT, V1, V2);
3675 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3676 MachineFunction &MF = DAG.getMachineFunction();
3677 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3678 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3679 int ReturnAddrIndex = FuncInfo->getRAIndex();
3681 if (ReturnAddrIndex == 0) {
3682 // Set up a frame object for the return address.
3683 unsigned SlotSize = RegInfo->getSlotSize();
3684 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3687 FuncInfo->setRAIndex(ReturnAddrIndex);
3690 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3693 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3694 bool hasSymbolicDisplacement) {
3695 // Offset should fit into 32 bit immediate field.
3696 if (!isInt<32>(Offset))
3699 // If we don't have a symbolic displacement - we don't have any extra
3701 if (!hasSymbolicDisplacement)
3704 // FIXME: Some tweaks might be needed for medium code model.
3705 if (M != CodeModel::Small && M != CodeModel::Kernel)
3708 // For small code model we assume that latest object is 16MB before end of 31
3709 // bits boundary. We may also accept pretty large negative constants knowing
3710 // that all objects are in the positive half of address space.
3711 if (M == CodeModel::Small && Offset < 16*1024*1024)
3714 // For kernel code model we know that all object resist in the negative half
3715 // of 32bits address space. We may not accept negative offsets, since they may
3716 // be just off and we may accept pretty large positive ones.
3717 if (M == CodeModel::Kernel && Offset >= 0)
3723 /// isCalleePop - Determines whether the callee is required to pop its
3724 /// own arguments. Callee pop is necessary to support tail calls.
3725 bool X86::isCalleePop(CallingConv::ID CallingConv,
3726 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3727 switch (CallingConv) {
3730 case CallingConv::X86_StdCall:
3731 case CallingConv::X86_FastCall:
3732 case CallingConv::X86_ThisCall:
3734 case CallingConv::Fast:
3735 case CallingConv::GHC:
3736 case CallingConv::HiPE:
3743 /// \brief Return true if the condition is an unsigned comparison operation.
3744 static bool isX86CCUnsigned(unsigned X86CC) {
3746 default: llvm_unreachable("Invalid integer condition!");
3747 case X86::COND_E: return true;
3748 case X86::COND_G: return false;
3749 case X86::COND_GE: return false;
3750 case X86::COND_L: return false;
3751 case X86::COND_LE: return false;
3752 case X86::COND_NE: return true;
3753 case X86::COND_B: return true;
3754 case X86::COND_A: return true;
3755 case X86::COND_BE: return true;
3756 case X86::COND_AE: return true;
3758 llvm_unreachable("covered switch fell through?!");
3761 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3762 /// specific condition code, returning the condition code and the LHS/RHS of the
3763 /// comparison to make.
3764 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3765 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3767 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3768 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3769 // X > -1 -> X == 0, jump !sign.
3770 RHS = DAG.getConstant(0, RHS.getValueType());
3771 return X86::COND_NS;
3773 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3774 // X < 0 -> X == 0, jump on sign.
3777 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3779 RHS = DAG.getConstant(0, RHS.getValueType());
3780 return X86::COND_LE;
3784 switch (SetCCOpcode) {
3785 default: llvm_unreachable("Invalid integer condition!");
3786 case ISD::SETEQ: return X86::COND_E;
3787 case ISD::SETGT: return X86::COND_G;
3788 case ISD::SETGE: return X86::COND_GE;
3789 case ISD::SETLT: return X86::COND_L;
3790 case ISD::SETLE: return X86::COND_LE;
3791 case ISD::SETNE: return X86::COND_NE;
3792 case ISD::SETULT: return X86::COND_B;
3793 case ISD::SETUGT: return X86::COND_A;
3794 case ISD::SETULE: return X86::COND_BE;
3795 case ISD::SETUGE: return X86::COND_AE;
3799 // First determine if it is required or is profitable to flip the operands.
3801 // If LHS is a foldable load, but RHS is not, flip the condition.
3802 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3803 !ISD::isNON_EXTLoad(RHS.getNode())) {
3804 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3805 std::swap(LHS, RHS);
3808 switch (SetCCOpcode) {
3814 std::swap(LHS, RHS);
3818 // On a floating point condition, the flags are set as follows:
3820 // 0 | 0 | 0 | X > Y
3821 // 0 | 0 | 1 | X < Y
3822 // 1 | 0 | 0 | X == Y
3823 // 1 | 1 | 1 | unordered
3824 switch (SetCCOpcode) {
3825 default: llvm_unreachable("Condcode should be pre-legalized away");
3827 case ISD::SETEQ: return X86::COND_E;
3828 case ISD::SETOLT: // flipped
3830 case ISD::SETGT: return X86::COND_A;
3831 case ISD::SETOLE: // flipped
3833 case ISD::SETGE: return X86::COND_AE;
3834 case ISD::SETUGT: // flipped
3836 case ISD::SETLT: return X86::COND_B;
3837 case ISD::SETUGE: // flipped
3839 case ISD::SETLE: return X86::COND_BE;
3841 case ISD::SETNE: return X86::COND_NE;
3842 case ISD::SETUO: return X86::COND_P;
3843 case ISD::SETO: return X86::COND_NP;
3845 case ISD::SETUNE: return X86::COND_INVALID;
3849 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3850 /// code. Current x86 isa includes the following FP cmov instructions:
3851 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3852 static bool hasFPCMov(unsigned X86CC) {
3868 /// isFPImmLegal - Returns true if the target can instruction select the
3869 /// specified FP immediate natively. If false, the legalizer will
3870 /// materialize the FP immediate as a load from a constant pool.
3871 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3872 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3873 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3879 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3880 ISD::LoadExtType ExtTy,
3882 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3883 // relocation target a movq or addq instruction: don't let the load shrink.
3884 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3885 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3886 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3887 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3891 /// \brief Returns true if it is beneficial to convert a load of a constant
3892 /// to just the constant itself.
3893 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3895 assert(Ty->isIntegerTy());
3897 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3898 if (BitSize == 0 || BitSize > 64)
3903 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3904 unsigned Index) const {
3905 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3908 return (Index == 0 || Index == ResVT.getVectorNumElements());
3911 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3912 // Speculate cttz only if we can directly use TZCNT.
3913 return Subtarget->hasBMI();
3916 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3917 // Speculate ctlz only if we can directly use LZCNT.
3918 return Subtarget->hasLZCNT();
3921 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3922 /// the specified range (L, H].
3923 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3924 return (Val < 0) || (Val >= Low && Val < Hi);
3927 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3928 /// specified value.
3929 static bool isUndefOrEqual(int Val, int CmpVal) {
3930 return (Val < 0 || Val == CmpVal);
3933 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3934 /// from position Pos and ending in Pos+Size, falls within the specified
3935 /// sequential range (Low, Low+Size]. or is undef.
3936 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3937 unsigned Pos, unsigned Size, int Low) {
3938 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3939 if (!isUndefOrEqual(Mask[i], Low))
3944 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3945 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3946 /// operand - by default will match for first operand.
3947 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3948 bool TestSecondOperand = false) {
3949 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3950 VT != MVT::v2f64 && VT != MVT::v2i64)
3953 unsigned NumElems = VT.getVectorNumElements();
3954 unsigned Lo = TestSecondOperand ? NumElems : 0;
3955 unsigned Hi = Lo + NumElems;
3957 for (unsigned i = 0; i < NumElems; ++i)
3958 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3964 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3965 /// is suitable for input to PSHUFHW.
3966 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3967 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3970 // Lower quadword copied in order or undef.
3971 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3974 // Upper quadword shuffled.
3975 for (unsigned i = 4; i != 8; ++i)
3976 if (!isUndefOrInRange(Mask[i], 4, 8))
3979 if (VT == MVT::v16i16) {
3980 // Lower quadword copied in order or undef.
3981 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3984 // Upper quadword shuffled.
3985 for (unsigned i = 12; i != 16; ++i)
3986 if (!isUndefOrInRange(Mask[i], 12, 16))
3993 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3994 /// is suitable for input to PSHUFLW.
3995 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3996 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3999 // Upper quadword copied in order.
4000 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4003 // Lower quadword shuffled.
4004 for (unsigned i = 0; i != 4; ++i)
4005 if (!isUndefOrInRange(Mask[i], 0, 4))
4008 if (VT == MVT::v16i16) {
4009 // Upper quadword copied in order.
4010 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4013 // Lower quadword shuffled.
4014 for (unsigned i = 8; i != 12; ++i)
4015 if (!isUndefOrInRange(Mask[i], 8, 12))
4022 /// \brief Return true if the mask specifies a shuffle of elements that is
4023 /// suitable for input to intralane (palignr) or interlane (valign) vector
4025 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4026 unsigned NumElts = VT.getVectorNumElements();
4027 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4028 unsigned NumLaneElts = NumElts/NumLanes;
4030 // Do not handle 64-bit element shuffles with palignr.
4031 if (NumLaneElts == 2)
4034 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4036 for (i = 0; i != NumLaneElts; ++i) {
4041 // Lane is all undef, go to next lane
4042 if (i == NumLaneElts)
4045 int Start = Mask[i+l];
4047 // Make sure its in this lane in one of the sources
4048 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4049 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4052 // If not lane 0, then we must match lane 0
4053 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4056 // Correct second source to be contiguous with first source
4057 if (Start >= (int)NumElts)
4058 Start -= NumElts - NumLaneElts;
4060 // Make sure we're shifting in the right direction.
4061 if (Start <= (int)(i+l))
4066 // Check the rest of the elements to see if they are consecutive.
4067 for (++i; i != NumLaneElts; ++i) {
4068 int Idx = Mask[i+l];
4070 // Make sure its in this lane
4071 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4072 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4075 // If not lane 0, then we must match lane 0
4076 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4079 if (Idx >= (int)NumElts)
4080 Idx -= NumElts - NumLaneElts;
4082 if (!isUndefOrEqual(Idx, Start+i))
4091 /// \brief Return true if the node specifies a shuffle of elements that is
4092 /// suitable for input to PALIGNR.
4093 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4094 const X86Subtarget *Subtarget) {
4095 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4096 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4097 VT.is512BitVector())
4098 // FIXME: Add AVX512BW.
4101 return isAlignrMask(Mask, VT, false);
4104 /// \brief Return true if the node specifies a shuffle of elements that is
4105 /// suitable for input to VALIGN.
4106 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4107 const X86Subtarget *Subtarget) {
4108 // FIXME: Add AVX512VL.
4109 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4111 return isAlignrMask(Mask, VT, true);
4114 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4115 /// the two vector operands have swapped position.
4116 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4117 unsigned NumElems) {
4118 for (unsigned i = 0; i != NumElems; ++i) {
4122 else if (idx < (int)NumElems)
4123 Mask[i] = idx + NumElems;
4125 Mask[i] = idx - NumElems;
4129 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4130 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4131 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4132 /// reverse of what x86 shuffles want.
4133 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4135 unsigned NumElems = VT.getVectorNumElements();
4136 unsigned NumLanes = VT.getSizeInBits()/128;
4137 unsigned NumLaneElems = NumElems/NumLanes;
4139 if (NumLaneElems != 2 && NumLaneElems != 4)
4142 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4143 bool symetricMaskRequired =
4144 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4146 // VSHUFPSY divides the resulting vector into 4 chunks.
4147 // The sources are also splitted into 4 chunks, and each destination
4148 // chunk must come from a different source chunk.
4150 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4151 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4153 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4154 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4156 // VSHUFPDY divides the resulting vector into 4 chunks.
4157 // The sources are also splitted into 4 chunks, and each destination
4158 // chunk must come from a different source chunk.
4160 // SRC1 => X3 X2 X1 X0
4161 // SRC2 => Y3 Y2 Y1 Y0
4163 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4165 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4166 unsigned HalfLaneElems = NumLaneElems/2;
4167 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4168 for (unsigned i = 0; i != NumLaneElems; ++i) {
4169 int Idx = Mask[i+l];
4170 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4171 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4173 // For VSHUFPSY, the mask of the second half must be the same as the
4174 // first but with the appropriate offsets. This works in the same way as
4175 // VPERMILPS works with masks.
4176 if (!symetricMaskRequired || Idx < 0)
4178 if (MaskVal[i] < 0) {
4179 MaskVal[i] = Idx - l;
4182 if ((signed)(Idx - l) != MaskVal[i])
4190 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4191 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4192 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4193 if (!VT.is128BitVector())
4196 unsigned NumElems = VT.getVectorNumElements();
4201 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4202 return isUndefOrEqual(Mask[0], 6) &&
4203 isUndefOrEqual(Mask[1], 7) &&
4204 isUndefOrEqual(Mask[2], 2) &&
4205 isUndefOrEqual(Mask[3], 3);
4208 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4209 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4211 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4212 if (!VT.is128BitVector())
4215 unsigned NumElems = VT.getVectorNumElements();
4220 return isUndefOrEqual(Mask[0], 2) &&
4221 isUndefOrEqual(Mask[1], 3) &&
4222 isUndefOrEqual(Mask[2], 2) &&
4223 isUndefOrEqual(Mask[3], 3);
4226 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4227 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4228 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4229 if (!VT.is128BitVector())
4232 unsigned NumElems = VT.getVectorNumElements();
4234 if (NumElems != 2 && NumElems != 4)
4237 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i + NumElems))
4241 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4242 if (!isUndefOrEqual(Mask[i], i))
4248 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4249 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4250 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4251 if (!VT.is128BitVector())
4254 unsigned NumElems = VT.getVectorNumElements();
4256 if (NumElems != 2 && NumElems != 4)
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i], i))
4263 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4264 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4270 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4271 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4272 /// i. e: If all but one element come from the same vector.
4273 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4274 // TODO: Deal with AVX's VINSERTPS
4275 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4278 unsigned CorrectPosV1 = 0;
4279 unsigned CorrectPosV2 = 0;
4280 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4281 if (Mask[i] == -1) {
4289 else if (Mask[i] == i + 4)
4293 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4294 // We have 3 elements (undefs count as elements from any vector) from one
4295 // vector, and one from another.
4302 // Some special combinations that can be optimized.
4305 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4306 SelectionDAG &DAG) {
4307 MVT VT = SVOp->getSimpleValueType(0);
4310 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4313 ArrayRef<int> Mask = SVOp->getMask();
4315 // These are the special masks that may be optimized.
4316 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4317 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4318 bool MatchEvenMask = true;
4319 bool MatchOddMask = true;
4320 for (int i=0; i<8; ++i) {
4321 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4322 MatchEvenMask = false;
4323 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4324 MatchOddMask = false;
4327 if (!MatchEvenMask && !MatchOddMask)
4330 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4332 SDValue Op0 = SVOp->getOperand(0);
4333 SDValue Op1 = SVOp->getOperand(1);
4335 if (MatchEvenMask) {
4336 // Shift the second operand right to 32 bits.
4337 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4338 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4340 // Shift the first operand left to 32 bits.
4341 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4342 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4344 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4345 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4348 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4349 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4350 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4351 bool HasInt256, bool V2IsSplat = false) {
4353 assert(VT.getSizeInBits() >= 128 &&
4354 "Unsupported vector type for unpckl");
4356 unsigned NumElts = VT.getVectorNumElements();
4357 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4358 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4361 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4362 "Unsupported vector type for unpckh");
4364 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4365 unsigned NumLanes = VT.getSizeInBits()/128;
4366 unsigned NumLaneElts = NumElts/NumLanes;
4368 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4369 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4370 int BitI = Mask[l+i];
4371 int BitI1 = Mask[l+i+1];
4372 if (!isUndefOrEqual(BitI, j))
4375 if (!isUndefOrEqual(BitI1, NumElts))
4378 if (!isUndefOrEqual(BitI1, j + NumElts))
4387 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4388 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4389 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4390 bool HasInt256, bool V2IsSplat = false) {
4391 assert(VT.getSizeInBits() >= 128 &&
4392 "Unsupported vector type for unpckh");
4394 unsigned NumElts = VT.getVectorNumElements();
4395 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4396 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4399 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4400 "Unsupported vector type for unpckh");
4402 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4403 unsigned NumLanes = VT.getSizeInBits()/128;
4404 unsigned NumLaneElts = NumElts/NumLanes;
4406 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4407 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4408 int BitI = Mask[l+i];
4409 int BitI1 = Mask[l+i+1];
4410 if (!isUndefOrEqual(BitI, j))
4413 if (isUndefOrEqual(BitI1, NumElts))
4416 if (!isUndefOrEqual(BitI1, j+NumElts))
4424 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4425 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4427 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4428 unsigned NumElts = VT.getVectorNumElements();
4429 bool Is256BitVec = VT.is256BitVector();
4431 if (VT.is512BitVector())
4433 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4434 "Unsupported vector type for unpckh");
4436 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4437 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4440 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4441 // FIXME: Need a better way to get rid of this, there's no latency difference
4442 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4443 // the former later. We should also remove the "_undef" special mask.
4444 if (NumElts == 4 && Is256BitVec)
4447 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4448 // independently on 128-bit lanes.
4449 unsigned NumLanes = VT.getSizeInBits()/128;
4450 unsigned NumLaneElts = NumElts/NumLanes;
4452 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4453 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4454 int BitI = Mask[l+i];
4455 int BitI1 = Mask[l+i+1];
4457 if (!isUndefOrEqual(BitI, j))
4459 if (!isUndefOrEqual(BitI1, j))
4467 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4468 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4470 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4471 unsigned NumElts = VT.getVectorNumElements();
4473 if (VT.is512BitVector())
4476 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4477 "Unsupported vector type for unpckh");
4479 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4480 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4483 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4484 // independently on 128-bit lanes.
4485 unsigned NumLanes = VT.getSizeInBits()/128;
4486 unsigned NumLaneElts = NumElts/NumLanes;
4488 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4489 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4490 int BitI = Mask[l+i];
4491 int BitI1 = Mask[l+i+1];
4492 if (!isUndefOrEqual(BitI, j))
4494 if (!isUndefOrEqual(BitI1, j))
4501 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4502 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4503 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4504 if (!VT.is512BitVector())
4507 unsigned NumElts = VT.getVectorNumElements();
4508 unsigned HalfSize = NumElts/2;
4509 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4510 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4515 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4516 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4524 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4525 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4526 /// MOVSD, and MOVD, i.e. setting the lowest element.
4527 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4528 if (VT.getVectorElementType().getSizeInBits() < 32)
4530 if (!VT.is128BitVector())
4533 unsigned NumElts = VT.getVectorNumElements();
4535 if (!isUndefOrEqual(Mask[0], NumElts))
4538 for (unsigned i = 1; i != NumElts; ++i)
4539 if (!isUndefOrEqual(Mask[i], i))
4545 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4546 /// as permutations between 128-bit chunks or halves. As an example: this
4548 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4549 /// The first half comes from the second half of V1 and the second half from the
4550 /// the second half of V2.
4551 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4552 if (!HasFp256 || !VT.is256BitVector())
4555 // The shuffle result is divided into half A and half B. In total the two
4556 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4557 // B must come from C, D, E or F.
4558 unsigned HalfSize = VT.getVectorNumElements()/2;
4559 bool MatchA = false, MatchB = false;
4561 // Check if A comes from one of C, D, E, F.
4562 for (unsigned Half = 0; Half != 4; ++Half) {
4563 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4569 // Check if B comes from one of C, D, E, F.
4570 for (unsigned Half = 0; Half != 4; ++Half) {
4571 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4577 return MatchA && MatchB;
4580 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4581 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4582 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4583 MVT VT = SVOp->getSimpleValueType(0);
4585 unsigned HalfSize = VT.getVectorNumElements()/2;
4587 unsigned FstHalf = 0, SndHalf = 0;
4588 for (unsigned i = 0; i < HalfSize; ++i) {
4589 if (SVOp->getMaskElt(i) > 0) {
4590 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4594 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4595 if (SVOp->getMaskElt(i) > 0) {
4596 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4601 return (FstHalf | (SndHalf << 4));
4604 // Symetric in-lane mask. Each lane has 4 elements (for imm8)
4605 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4606 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4610 unsigned NumElts = VT.getVectorNumElements();
4612 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4613 for (unsigned i = 0; i != NumElts; ++i) {
4616 Imm8 |= Mask[i] << (i*2);
4621 unsigned LaneSize = 4;
4622 SmallVector<int, 4> MaskVal(LaneSize, -1);
4624 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4625 for (unsigned i = 0; i != LaneSize; ++i) {
4626 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4630 if (MaskVal[i] < 0) {
4631 MaskVal[i] = Mask[i+l] - l;
4632 Imm8 |= MaskVal[i] << (i*2);
4635 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4642 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4643 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4644 /// Note that VPERMIL mask matching is different depending whether theunderlying
4645 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4646 /// to the same elements of the low, but to the higher half of the source.
4647 /// In VPERMILPD the two lanes could be shuffled independently of each other
4648 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4649 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4650 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4651 if (VT.getSizeInBits() < 256 || EltSize < 32)
4653 bool symetricMaskRequired = (EltSize == 32);
4654 unsigned NumElts = VT.getVectorNumElements();
4656 unsigned NumLanes = VT.getSizeInBits()/128;
4657 unsigned LaneSize = NumElts/NumLanes;
4658 // 2 or 4 elements in one lane
4660 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4661 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4662 for (unsigned i = 0; i != LaneSize; ++i) {
4663 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4665 if (symetricMaskRequired) {
4666 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4667 ExpectedMaskVal[i] = Mask[i+l] - l;
4670 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4678 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4679 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4680 /// element of vector 2 and the other elements to come from vector 1 in order.
4681 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4682 bool V2IsSplat = false, bool V2IsUndef = false) {
4683 if (!VT.is128BitVector())
4686 unsigned NumOps = VT.getVectorNumElements();
4687 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4690 if (!isUndefOrEqual(Mask[0], 0))
4693 for (unsigned i = 1; i != NumOps; ++i)
4694 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4695 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4696 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4702 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4703 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4704 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4705 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4706 const X86Subtarget *Subtarget) {
4707 if (!Subtarget->hasSSE3())
4710 unsigned NumElems = VT.getVectorNumElements();
4712 if ((VT.is128BitVector() && NumElems != 4) ||
4713 (VT.is256BitVector() && NumElems != 8) ||
4714 (VT.is512BitVector() && NumElems != 16))
4717 // "i+1" is the value the indexed mask element must have
4718 for (unsigned i = 0; i != NumElems; i += 2)
4719 if (!isUndefOrEqual(Mask[i], i+1) ||
4720 !isUndefOrEqual(Mask[i+1], i+1))
4726 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4727 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4728 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4729 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4730 const X86Subtarget *Subtarget) {
4731 if (!Subtarget->hasSSE3())
4734 unsigned NumElems = VT.getVectorNumElements();
4736 if ((VT.is128BitVector() && NumElems != 4) ||
4737 (VT.is256BitVector() && NumElems != 8) ||
4738 (VT.is512BitVector() && NumElems != 16))
4741 // "i" is the value the indexed mask element must have
4742 for (unsigned i = 0; i != NumElems; i += 2)
4743 if (!isUndefOrEqual(Mask[i], i) ||
4744 !isUndefOrEqual(Mask[i+1], i))
4750 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4751 /// specifies a shuffle of elements that is suitable for input to 256-bit
4752 /// version of MOVDDUP.
4753 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4754 if (!HasFp256 || !VT.is256BitVector())
4757 unsigned NumElts = VT.getVectorNumElements();
4761 for (unsigned i = 0; i != NumElts/2; ++i)
4762 if (!isUndefOrEqual(Mask[i], 0))
4764 for (unsigned i = NumElts/2; i != NumElts; ++i)
4765 if (!isUndefOrEqual(Mask[i], NumElts/2))
4770 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4771 /// specifies a shuffle of elements that is suitable for input to 128-bit
4772 /// version of MOVDDUP.
4773 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4774 if (!VT.is128BitVector())
4777 unsigned e = VT.getVectorNumElements() / 2;
4778 for (unsigned i = 0; i != e; ++i)
4779 if (!isUndefOrEqual(Mask[i], i))
4781 for (unsigned i = 0; i != e; ++i)
4782 if (!isUndefOrEqual(Mask[e+i], i))
4787 /// isVEXTRACTIndex - Return true if the specified
4788 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4789 /// suitable for instruction that extract 128 or 256 bit vectors
4790 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4791 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4792 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4795 // The index should be aligned on a vecWidth-bit boundary.
4797 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4799 MVT VT = N->getSimpleValueType(0);
4800 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4801 bool Result = (Index * ElSize) % vecWidth == 0;
4806 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4807 /// operand specifies a subvector insert that is suitable for input to
4808 /// insertion of 128 or 256-bit subvectors
4809 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4810 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4811 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4813 // The index should be aligned on a vecWidth-bit boundary.
4815 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4817 MVT VT = N->getSimpleValueType(0);
4818 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4819 bool Result = (Index * ElSize) % vecWidth == 0;
4824 bool X86::isVINSERT128Index(SDNode *N) {
4825 return isVINSERTIndex(N, 128);
4828 bool X86::isVINSERT256Index(SDNode *N) {
4829 return isVINSERTIndex(N, 256);
4832 bool X86::isVEXTRACT128Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 128);
4836 bool X86::isVEXTRACT256Index(SDNode *N) {
4837 return isVEXTRACTIndex(N, 256);
4840 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4841 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4842 /// Handles 128-bit and 256-bit.
4843 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4844 MVT VT = N->getSimpleValueType(0);
4846 assert((VT.getSizeInBits() >= 128) &&
4847 "Unsupported vector type for PSHUF/SHUFP");
4849 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4850 // independently on 128-bit lanes.
4851 unsigned NumElts = VT.getVectorNumElements();
4852 unsigned NumLanes = VT.getSizeInBits()/128;
4853 unsigned NumLaneElts = NumElts/NumLanes;
4855 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4856 "Only supports 2, 4 or 8 elements per lane");
4858 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4860 for (unsigned i = 0; i != NumElts; ++i) {
4861 int Elt = N->getMaskElt(i);
4862 if (Elt < 0) continue;
4863 Elt &= NumLaneElts - 1;
4864 unsigned ShAmt = (i << Shift) % 8;
4865 Mask |= Elt << ShAmt;
4871 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4872 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4873 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4874 MVT VT = N->getSimpleValueType(0);
4876 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4877 "Unsupported vector type for PSHUFHW");
4879 unsigned NumElts = VT.getVectorNumElements();
4882 for (unsigned l = 0; l != NumElts; l += 8) {
4883 // 8 nodes per lane, but we only care about the last 4.
4884 for (unsigned i = 0; i < 4; ++i) {
4885 int Elt = N->getMaskElt(l+i+4);
4886 if (Elt < 0) continue;
4887 Elt &= 0x3; // only 2-bits.
4888 Mask |= Elt << (i * 2);
4895 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4896 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4897 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4898 MVT VT = N->getSimpleValueType(0);
4900 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4901 "Unsupported vector type for PSHUFHW");
4903 unsigned NumElts = VT.getVectorNumElements();
4906 for (unsigned l = 0; l != NumElts; l += 8) {
4907 // 8 nodes per lane, but we only care about the first 4.
4908 for (unsigned i = 0; i < 4; ++i) {
4909 int Elt = N->getMaskElt(l+i);
4910 if (Elt < 0) continue;
4911 Elt &= 0x3; // only 2-bits
4912 Mask |= Elt << (i * 2);
4919 /// \brief Return the appropriate immediate to shuffle the specified
4920 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4921 /// VALIGN (if Interlane is true) instructions.
4922 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4924 MVT VT = SVOp->getSimpleValueType(0);
4925 unsigned EltSize = InterLane ? 1 :
4926 VT.getVectorElementType().getSizeInBits() >> 3;
4928 unsigned NumElts = VT.getVectorNumElements();
4929 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4930 unsigned NumLaneElts = NumElts/NumLanes;
4934 for (i = 0; i != NumElts; ++i) {
4935 Val = SVOp->getMaskElt(i);
4939 if (Val >= (int)NumElts)
4940 Val -= NumElts - NumLaneElts;
4942 assert(Val - i > 0 && "PALIGNR imm should be positive");
4943 return (Val - i) * EltSize;
4946 /// \brief Return the appropriate immediate to shuffle the specified
4947 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4948 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4949 return getShuffleAlignrImmediate(SVOp, false);
4952 /// \brief Return the appropriate immediate to shuffle the specified
4953 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4954 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4955 return getShuffleAlignrImmediate(SVOp, true);
4959 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4960 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4961 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4962 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4965 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4967 MVT VecVT = N->getOperand(0).getSimpleValueType();
4968 MVT ElVT = VecVT.getVectorElementType();
4970 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4971 return Index / NumElemsPerChunk;
4974 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4975 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4976 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4977 llvm_unreachable("Illegal insert subvector for VINSERT");
4980 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4982 MVT VecVT = N->getSimpleValueType(0);
4983 MVT ElVT = VecVT.getVectorElementType();
4985 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4986 return Index / NumElemsPerChunk;
4989 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4990 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4991 /// and VINSERTI128 instructions.
4992 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4993 return getExtractVEXTRACTImmediate(N, 128);
4996 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4997 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4998 /// and VINSERTI64x4 instructions.
4999 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5000 return getExtractVEXTRACTImmediate(N, 256);
5003 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5004 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5005 /// and VINSERTI128 instructions.
5006 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5007 return getInsertVINSERTImmediate(N, 128);
5010 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5011 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5012 /// and VINSERTI64x4 instructions.
5013 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5014 return getInsertVINSERTImmediate(N, 256);
5017 /// isZero - Returns true if Elt is a constant integer zero
5018 static bool isZero(SDValue V) {
5019 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5020 return C && C->isNullValue();
5023 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5025 bool X86::isZeroNode(SDValue Elt) {
5028 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5029 return CFP->getValueAPF().isPosZero();
5033 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5034 /// match movhlps. The lower half elements should come from upper half of
5035 /// V1 (and in order), and the upper half elements should come from the upper
5036 /// half of V2 (and in order).
5037 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5038 if (!VT.is128BitVector())
5040 if (VT.getVectorNumElements() != 4)
5042 for (unsigned i = 0, e = 2; i != e; ++i)
5043 if (!isUndefOrEqual(Mask[i], i+2))
5045 for (unsigned i = 2; i != 4; ++i)
5046 if (!isUndefOrEqual(Mask[i], i+4))
5051 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5052 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5054 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5055 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5057 N = N->getOperand(0).getNode();
5058 if (!ISD::isNON_EXTLoad(N))
5061 *LD = cast<LoadSDNode>(N);
5065 // Test whether the given value is a vector value which will be legalized
5067 static bool WillBeConstantPoolLoad(SDNode *N) {
5068 if (N->getOpcode() != ISD::BUILD_VECTOR)
5071 // Check for any non-constant elements.
5072 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5073 switch (N->getOperand(i).getNode()->getOpcode()) {
5075 case ISD::ConstantFP:
5082 // Vectors of all-zeros and all-ones are materialized with special
5083 // instructions rather than being loaded.
5084 return !ISD::isBuildVectorAllZeros(N) &&
5085 !ISD::isBuildVectorAllOnes(N);
5088 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5089 /// match movlp{s|d}. The lower half elements should come from lower half of
5090 /// V1 (and in order), and the upper half elements should come from the upper
5091 /// half of V2 (and in order). And since V1 will become the source of the
5092 /// MOVLP, it must be either a vector load or a scalar load to vector.
5093 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5094 ArrayRef<int> Mask, MVT VT) {
5095 if (!VT.is128BitVector())
5098 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5100 // Is V2 is a vector load, don't do this transformation. We will try to use
5101 // load folding shufps op.
5102 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5105 unsigned NumElems = VT.getVectorNumElements();
5107 if (NumElems != 2 && NumElems != 4)
5109 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5110 if (!isUndefOrEqual(Mask[i], i))
5112 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5113 if (!isUndefOrEqual(Mask[i], i+NumElems))
5118 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5119 /// to an zero vector.
5120 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5121 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5122 SDValue V1 = N->getOperand(0);
5123 SDValue V2 = N->getOperand(1);
5124 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5125 for (unsigned i = 0; i != NumElems; ++i) {
5126 int Idx = N->getMaskElt(i);
5127 if (Idx >= (int)NumElems) {
5128 unsigned Opc = V2.getOpcode();
5129 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5131 if (Opc != ISD::BUILD_VECTOR ||
5132 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5134 } else if (Idx >= 0) {
5135 unsigned Opc = V1.getOpcode();
5136 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5138 if (Opc != ISD::BUILD_VECTOR ||
5139 !X86::isZeroNode(V1.getOperand(Idx)))
5146 /// getZeroVector - Returns a vector of specified type with all zero elements.
5148 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5149 SelectionDAG &DAG, SDLoc dl) {
5150 assert(VT.isVector() && "Expected a vector type");
5152 // Always build SSE zero vectors as <4 x i32> bitcasted
5153 // to their dest type. This ensures they get CSE'd.
5155 if (VT.is128BitVector()) { // SSE
5156 if (Subtarget->hasSSE2()) { // SSE2
5157 SDValue Cst = DAG.getConstant(0, MVT::i32);
5158 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5160 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5161 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5163 } else if (VT.is256BitVector()) { // AVX
5164 if (Subtarget->hasInt256()) { // AVX2
5165 SDValue Cst = DAG.getConstant(0, MVT::i32);
5166 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5167 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5169 // 256-bit logic and arithmetic instructions in AVX are all
5170 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5171 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5172 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5173 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5175 } else if (VT.is512BitVector()) { // AVX-512
5176 SDValue Cst = DAG.getConstant(0, MVT::i32);
5177 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5178 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5179 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5180 } else if (VT.getScalarType() == MVT::i1) {
5181 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5182 SDValue Cst = DAG.getConstant(0, MVT::i1);
5183 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5184 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5186 llvm_unreachable("Unexpected vector type");
5188 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5191 /// getOnesVector - Returns a vector of specified type with all bits set.
5192 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5193 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5194 /// Then bitcast to their original type, ensuring they get CSE'd.
5195 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5197 assert(VT.isVector() && "Expected a vector type");
5199 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5201 if (VT.is256BitVector()) {
5202 if (HasInt256) { // AVX2
5203 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5204 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5207 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5209 } else if (VT.is128BitVector()) {
5210 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5212 llvm_unreachable("Unexpected vector type");
5214 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5217 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5218 /// that point to V2 points to its first element.
5219 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5220 for (unsigned i = 0; i != NumElems; ++i) {
5221 if (Mask[i] > (int)NumElems) {
5227 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5228 /// operation of specified width.
5229 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5231 unsigned NumElems = VT.getVectorNumElements();
5232 SmallVector<int, 8> Mask;
5233 Mask.push_back(NumElems);
5234 for (unsigned i = 1; i != NumElems; ++i)
5236 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5239 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5240 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5242 unsigned NumElems = VT.getVectorNumElements();
5243 SmallVector<int, 8> Mask;
5244 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5246 Mask.push_back(i + NumElems);
5248 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5251 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5252 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5254 unsigned NumElems = VT.getVectorNumElements();
5255 SmallVector<int, 8> Mask;
5256 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5257 Mask.push_back(i + Half);
5258 Mask.push_back(i + NumElems + Half);
5260 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5263 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5264 // a generic shuffle instruction because the target has no such instructions.
5265 // Generate shuffles which repeat i16 and i8 several times until they can be
5266 // represented by v4f32 and then be manipulated by target suported shuffles.
5267 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5268 MVT VT = V.getSimpleValueType();
5269 int NumElems = VT.getVectorNumElements();
5272 while (NumElems > 4) {
5273 if (EltNo < NumElems/2) {
5274 V = getUnpackl(DAG, dl, VT, V, V);
5276 V = getUnpackh(DAG, dl, VT, V, V);
5277 EltNo -= NumElems/2;
5284 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5285 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5286 MVT VT = V.getSimpleValueType();
5289 if (VT.is128BitVector()) {
5290 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5291 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5292 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5294 } else if (VT.is256BitVector()) {
5295 // To use VPERMILPS to splat scalars, the second half of indicies must
5296 // refer to the higher part, which is a duplication of the lower one,
5297 // because VPERMILPS can only handle in-lane permutations.
5298 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5299 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5301 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5302 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5305 llvm_unreachable("Vector size not supported");
5307 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5310 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5311 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5312 MVT SrcVT = SV->getSimpleValueType(0);
5313 SDValue V1 = SV->getOperand(0);
5316 int EltNo = SV->getSplatIndex();
5317 int NumElems = SrcVT.getVectorNumElements();
5318 bool Is256BitVec = SrcVT.is256BitVector();
5320 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5321 "Unknown how to promote splat for type");
5323 // Extract the 128-bit part containing the splat element and update
5324 // the splat element index when it refers to the higher register.
5326 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5327 if (EltNo >= NumElems/2)
5328 EltNo -= NumElems/2;
5331 // All i16 and i8 vector types can't be used directly by a generic shuffle
5332 // instruction because the target has no such instruction. Generate shuffles
5333 // which repeat i16 and i8 several times until they fit in i32, and then can
5334 // be manipulated by target suported shuffles.
5335 MVT EltVT = SrcVT.getVectorElementType();
5336 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5337 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5339 // Recreate the 256-bit vector and place the same 128-bit vector
5340 // into the low and high part. This is necessary because we want
5341 // to use VPERM* to shuffle the vectors
5343 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5346 return getLegalSplat(DAG, V1, EltNo);
5349 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5350 /// vector of zero or undef vector. This produces a shuffle where the low
5351 /// element of V2 is swizzled into the zero/undef vector, landing at element
5352 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5353 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5355 const X86Subtarget *Subtarget,
5356 SelectionDAG &DAG) {
5357 MVT VT = V2.getSimpleValueType();
5359 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5360 unsigned NumElems = VT.getVectorNumElements();
5361 SmallVector<int, 16> MaskVec;
5362 for (unsigned i = 0; i != NumElems; ++i)
5363 // If this is the insertion idx, put the low elt of V2 here.
5364 MaskVec.push_back(i == Idx ? NumElems : i);
5365 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5368 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5369 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5370 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5371 /// shuffles which use a single input multiple times, and in those cases it will
5372 /// adjust the mask to only have indices within that single input.
5373 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5374 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5375 unsigned NumElems = VT.getVectorNumElements();
5379 bool IsFakeUnary = false;
5380 switch(N->getOpcode()) {
5381 case X86ISD::BLENDI:
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5386 ImmN = N->getOperand(N->getNumOperands()-1);
5387 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKH:
5391 DecodeUNPCKHMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::UNPCKL:
5395 DecodeUNPCKLMask(VT, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVHLPS:
5399 DecodeMOVHLPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::MOVLHPS:
5403 DecodeMOVLHPSMask(NumElems, Mask);
5404 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5406 case X86ISD::PALIGNR:
5407 ImmN = N->getOperand(N->getNumOperands()-1);
5408 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5410 case X86ISD::PSHUFD:
5411 case X86ISD::VPERMILPI:
5412 ImmN = N->getOperand(N->getNumOperands()-1);
5413 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5416 case X86ISD::PSHUFHW:
5417 ImmN = N->getOperand(N->getNumOperands()-1);
5418 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5421 case X86ISD::PSHUFLW:
5422 ImmN = N->getOperand(N->getNumOperands()-1);
5423 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5426 case X86ISD::PSHUFB: {
5428 SDValue MaskNode = N->getOperand(1);
5429 while (MaskNode->getOpcode() == ISD::BITCAST)
5430 MaskNode = MaskNode->getOperand(0);
5432 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5433 // If we have a build-vector, then things are easy.
5434 EVT VT = MaskNode.getValueType();
5435 assert(VT.isVector() &&
5436 "Can't produce a non-vector with a build_vector!");
5437 if (!VT.isInteger())
5440 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5442 SmallVector<uint64_t, 32> RawMask;
5443 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5444 SDValue Op = MaskNode->getOperand(i);
5445 if (Op->getOpcode() == ISD::UNDEF) {
5446 RawMask.push_back((uint64_t)SM_SentinelUndef);
5449 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5452 APInt MaskElement = CN->getAPIntValue();
5454 // We now have to decode the element which could be any integer size and
5455 // extract each byte of it.
5456 for (int j = 0; j < NumBytesPerElement; ++j) {
5457 // Note that this is x86 and so always little endian: the low byte is
5458 // the first byte of the mask.
5459 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5460 MaskElement = MaskElement.lshr(8);
5463 DecodePSHUFBMask(RawMask, Mask);
5467 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5471 SDValue Ptr = MaskLoad->getBasePtr();
5472 if (Ptr->getOpcode() == X86ISD::Wrapper)
5473 Ptr = Ptr->getOperand(0);
5475 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5476 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5479 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5480 DecodePSHUFBMask(C, Mask);
5486 case X86ISD::VPERMI:
5487 ImmN = N->getOperand(N->getNumOperands()-1);
5488 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5493 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5495 case X86ISD::VPERM2X128:
5496 ImmN = N->getOperand(N->getNumOperands()-1);
5497 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5498 if (Mask.empty()) return false;
5500 case X86ISD::MOVSLDUP:
5501 DecodeMOVSLDUPMask(VT, Mask);
5504 case X86ISD::MOVSHDUP:
5505 DecodeMOVSHDUPMask(VT, Mask);
5508 case X86ISD::MOVDDUP:
5509 DecodeMOVDDUPMask(VT, Mask);
5512 case X86ISD::MOVLHPD:
5513 case X86ISD::MOVLPD:
5514 case X86ISD::MOVLPS:
5515 // Not yet implemented
5517 default: llvm_unreachable("unknown target shuffle node");
5520 // If we have a fake unary shuffle, the shuffle mask is spread across two
5521 // inputs that are actually the same node. Re-map the mask to always point
5522 // into the first input.
5525 if (M >= (int)Mask.size())
5531 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5532 /// element of the result of the vector shuffle.
5533 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5536 return SDValue(); // Limit search depth.
5538 SDValue V = SDValue(N, 0);
5539 EVT VT = V.getValueType();
5540 unsigned Opcode = V.getOpcode();
5542 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5543 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5544 int Elt = SV->getMaskElt(Index);
5547 return DAG.getUNDEF(VT.getVectorElementType());
5549 unsigned NumElems = VT.getVectorNumElements();
5550 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5551 : SV->getOperand(1);
5552 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5555 // Recurse into target specific vector shuffles to find scalars.
5556 if (isTargetShuffle(Opcode)) {
5557 MVT ShufVT = V.getSimpleValueType();
5558 unsigned NumElems = ShufVT.getVectorNumElements();
5559 SmallVector<int, 16> ShuffleMask;
5562 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5565 int Elt = ShuffleMask[Index];
5567 return DAG.getUNDEF(ShufVT.getVectorElementType());
5569 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5571 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5575 // Actual nodes that may contain scalar elements
5576 if (Opcode == ISD::BITCAST) {
5577 V = V.getOperand(0);
5578 EVT SrcVT = V.getValueType();
5579 unsigned NumElems = VT.getVectorNumElements();
5581 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5585 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5586 return (Index == 0) ? V.getOperand(0)
5587 : DAG.getUNDEF(VT.getVectorElementType());
5589 if (V.getOpcode() == ISD::BUILD_VECTOR)
5590 return V.getOperand(Index);
5595 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5596 /// shuffle operation which come from a consecutively from a zero. The
5597 /// search can start in two different directions, from left or right.
5598 /// We count undefs as zeros until PreferredNum is reached.
5599 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5600 unsigned NumElems, bool ZerosFromLeft,
5602 unsigned PreferredNum = -1U) {
5603 unsigned NumZeros = 0;
5604 for (unsigned i = 0; i != NumElems; ++i) {
5605 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5606 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5610 if (X86::isZeroNode(Elt))
5612 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5613 NumZeros = std::min(NumZeros + 1, PreferredNum);
5621 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5622 /// correspond consecutively to elements from one of the vector operands,
5623 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5625 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5626 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5627 unsigned NumElems, unsigned &OpNum) {
5628 bool SeenV1 = false;
5629 bool SeenV2 = false;
5631 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5632 int Idx = SVOp->getMaskElt(i);
5633 // Ignore undef indicies
5637 if (Idx < (int)NumElems)
5642 // Only accept consecutive elements from the same vector
5643 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5647 OpNum = SeenV1 ? 0 : 1;
5651 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5652 /// logical left shift of a vector.
5653 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5654 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5656 SVOp->getSimpleValueType(0).getVectorNumElements();
5657 unsigned NumZeros = getNumOfConsecutiveZeros(
5658 SVOp, NumElems, false /* check zeros from right */, DAG,
5659 SVOp->getMaskElt(0));
5665 // Considering the elements in the mask that are not consecutive zeros,
5666 // check if they consecutively come from only one of the source vectors.
5668 // V1 = {X, A, B, C} 0
5670 // vector_shuffle V1, V2 <1, 2, 3, X>
5672 if (!isShuffleMaskConsecutive(SVOp,
5673 0, // Mask Start Index
5674 NumElems-NumZeros, // Mask End Index(exclusive)
5675 NumZeros, // Where to start looking in the src vector
5676 NumElems, // Number of elements in vector
5677 OpSrc)) // Which source operand ?
5682 ShVal = SVOp->getOperand(OpSrc);
5686 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5687 /// logical left shift of a vector.
5688 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5689 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5691 SVOp->getSimpleValueType(0).getVectorNumElements();
5692 unsigned NumZeros = getNumOfConsecutiveZeros(
5693 SVOp, NumElems, true /* check zeros from left */, DAG,
5694 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5700 // Considering the elements in the mask that are not consecutive zeros,
5701 // check if they consecutively come from only one of the source vectors.
5703 // 0 { A, B, X, X } = V2
5705 // vector_shuffle V1, V2 <X, X, 4, 5>
5707 if (!isShuffleMaskConsecutive(SVOp,
5708 NumZeros, // Mask Start Index
5709 NumElems, // Mask End Index(exclusive)
5710 0, // Where to start looking in the src vector
5711 NumElems, // Number of elements in vector
5712 OpSrc)) // Which source operand ?
5717 ShVal = SVOp->getOperand(OpSrc);
5721 /// isVectorShift - Returns true if the shuffle can be implemented as a
5722 /// logical left or right shift of a vector.
5723 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5724 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5725 // Although the logic below support any bitwidth size, there are no
5726 // shift instructions which handle more than 128-bit vectors.
5727 if (!SVOp->getSimpleValueType(0).is128BitVector())
5730 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5731 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5737 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5739 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5740 unsigned NumNonZero, unsigned NumZero,
5742 const X86Subtarget* Subtarget,
5743 const TargetLowering &TLI) {
5750 for (unsigned i = 0; i < 16; ++i) {
5751 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5752 if (ThisIsNonZero && First) {
5754 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5756 V = DAG.getUNDEF(MVT::v8i16);
5761 SDValue ThisElt, LastElt;
5762 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5763 if (LastIsNonZero) {
5764 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5765 MVT::i16, Op.getOperand(i-1));
5767 if (ThisIsNonZero) {
5768 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5769 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5770 ThisElt, DAG.getConstant(8, MVT::i8));
5772 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5776 if (ThisElt.getNode())
5777 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5778 DAG.getIntPtrConstant(i/2));
5782 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5785 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5787 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5788 unsigned NumNonZero, unsigned NumZero,
5790 const X86Subtarget* Subtarget,
5791 const TargetLowering &TLI) {
5798 for (unsigned i = 0; i < 8; ++i) {
5799 bool isNonZero = (NonZeros & (1 << i)) != 0;
5803 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5805 V = DAG.getUNDEF(MVT::v8i16);
5808 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5809 MVT::v8i16, V, Op.getOperand(i),
5810 DAG.getIntPtrConstant(i));
5817 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5818 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5819 const X86Subtarget *Subtarget,
5820 const TargetLowering &TLI) {
5821 // Find all zeroable elements.
5823 for (int i=0; i < 4; ++i) {
5824 SDValue Elt = Op->getOperand(i);
5825 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5827 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5828 [](bool M) { return !M; }) > 1 &&
5829 "We expect at least two non-zero elements!");
5831 // We only know how to deal with build_vector nodes where elements are either
5832 // zeroable or extract_vector_elt with constant index.
5833 SDValue FirstNonZero;
5834 unsigned FirstNonZeroIdx;
5835 for (unsigned i=0; i < 4; ++i) {
5838 SDValue Elt = Op->getOperand(i);
5839 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5840 !isa<ConstantSDNode>(Elt.getOperand(1)))
5842 // Make sure that this node is extracting from a 128-bit vector.
5843 MVT VT = Elt.getOperand(0).getSimpleValueType();
5844 if (!VT.is128BitVector())
5846 if (!FirstNonZero.getNode()) {
5848 FirstNonZeroIdx = i;
5852 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5853 SDValue V1 = FirstNonZero.getOperand(0);
5854 MVT VT = V1.getSimpleValueType();
5856 // See if this build_vector can be lowered as a blend with zero.
5858 unsigned EltMaskIdx, EltIdx;
5860 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5861 if (Zeroable[EltIdx]) {
5862 // The zero vector will be on the right hand side.
5863 Mask[EltIdx] = EltIdx+4;
5867 Elt = Op->getOperand(EltIdx);
5868 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5869 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5870 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5872 Mask[EltIdx] = EltIdx;
5876 // Let the shuffle legalizer deal with blend operations.
5877 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5878 if (V1.getSimpleValueType() != VT)
5879 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5880 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5883 // See if we can lower this build_vector to a INSERTPS.
5884 if (!Subtarget->hasSSE41())
5887 SDValue V2 = Elt.getOperand(0);
5888 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5891 bool CanFold = true;
5892 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5896 SDValue Current = Op->getOperand(i);
5897 SDValue SrcVector = Current->getOperand(0);
5900 CanFold = SrcVector == V1 &&
5901 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5907 assert(V1.getNode() && "Expected at least two non-zero elements!");
5908 if (V1.getSimpleValueType() != MVT::v4f32)
5909 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5910 if (V2.getSimpleValueType() != MVT::v4f32)
5911 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5913 // Ok, we can emit an INSERTPS instruction.
5915 for (int i = 0; i < 4; ++i)
5919 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5920 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5921 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5922 DAG.getIntPtrConstant(InsertPSMask));
5923 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5926 /// Return a vector logical shift node.
5927 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5928 unsigned NumBits, SelectionDAG &DAG,
5929 const TargetLowering &TLI, SDLoc dl) {
5930 assert(VT.is128BitVector() && "Unknown type for VShift");
5931 MVT ShVT = MVT::v2i64;
5932 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5933 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5934 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5935 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5936 return DAG.getNode(ISD::BITCAST, dl, VT,
5937 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5941 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5943 // Check if the scalar load can be widened into a vector load. And if
5944 // the address is "base + cst" see if the cst can be "absorbed" into
5945 // the shuffle mask.
5946 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5947 SDValue Ptr = LD->getBasePtr();
5948 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5950 EVT PVT = LD->getValueType(0);
5951 if (PVT != MVT::i32 && PVT != MVT::f32)
5956 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5957 FI = FINode->getIndex();
5959 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5960 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5961 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5962 Offset = Ptr.getConstantOperandVal(1);
5963 Ptr = Ptr.getOperand(0);
5968 // FIXME: 256-bit vector instructions don't require a strict alignment,
5969 // improve this code to support it better.
5970 unsigned RequiredAlign = VT.getSizeInBits()/8;
5971 SDValue Chain = LD->getChain();
5972 // Make sure the stack object alignment is at least 16 or 32.
5973 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5974 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5975 if (MFI->isFixedObjectIndex(FI)) {
5976 // Can't change the alignment. FIXME: It's possible to compute
5977 // the exact stack offset and reference FI + adjust offset instead.
5978 // If someone *really* cares about this. That's the way to implement it.
5981 MFI->setObjectAlignment(FI, RequiredAlign);
5985 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5986 // Ptr + (Offset & ~15).
5989 if ((Offset % RequiredAlign) & 3)
5991 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5993 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5994 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5996 int EltNo = (Offset - StartOffset) >> 2;
5997 unsigned NumElems = VT.getVectorNumElements();
5999 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6000 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6001 LD->getPointerInfo().getWithOffset(StartOffset),
6002 false, false, false, 0);
6004 SmallVector<int, 8> Mask;
6005 for (unsigned i = 0; i != NumElems; ++i)
6006 Mask.push_back(EltNo);
6008 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6014 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6015 /// elements can be replaced by a single large load which has the same value as
6016 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6018 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6020 /// FIXME: we'd also like to handle the case where the last elements are zero
6021 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6022 /// There's even a handy isZeroNode for that purpose.
6023 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6024 SDLoc &DL, SelectionDAG &DAG,
6025 bool isAfterLegalize) {
6026 unsigned NumElems = Elts.size();
6028 LoadSDNode *LDBase = nullptr;
6029 unsigned LastLoadedElt = -1U;
6031 // For each element in the initializer, see if we've found a load or an undef.
6032 // If we don't find an initial load element, or later load elements are
6033 // non-consecutive, bail out.
6034 for (unsigned i = 0; i < NumElems; ++i) {
6035 SDValue Elt = Elts[i];
6036 // Look through a bitcast.
6037 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6038 Elt = Elt.getOperand(0);
6039 if (!Elt.getNode() ||
6040 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6043 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6045 LDBase = cast<LoadSDNode>(Elt.getNode());
6049 if (Elt.getOpcode() == ISD::UNDEF)
6052 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6053 EVT LdVT = Elt.getValueType();
6054 // Each loaded element must be the correct fractional portion of the
6055 // requested vector load.
6056 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6058 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6063 // If we have found an entire vector of loads and undefs, then return a large
6064 // load of the entire vector width starting at the base pointer. If we found
6065 // consecutive loads for the low half, generate a vzext_load node.
6066 if (LastLoadedElt == NumElems - 1) {
6067 assert(LDBase && "Did not find base load for merging consecutive loads");
6068 EVT EltVT = LDBase->getValueType(0);
6069 // Ensure that the input vector size for the merged loads matches the
6070 // cumulative size of the input elements.
6071 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6074 if (isAfterLegalize &&
6075 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6078 SDValue NewLd = SDValue();
6080 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6081 LDBase->getPointerInfo(), LDBase->isVolatile(),
6082 LDBase->isNonTemporal(), LDBase->isInvariant(),
6083 LDBase->getAlignment());
6085 if (LDBase->hasAnyUseOfValue(1)) {
6086 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6088 SDValue(NewLd.getNode(), 1));
6089 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6090 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6091 SDValue(NewLd.getNode(), 1));
6097 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6098 //of a v4i32 / v4f32. It's probably worth generalizing.
6099 EVT EltVT = VT.getVectorElementType();
6100 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6101 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6102 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6103 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6105 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6106 LDBase->getPointerInfo(),
6107 LDBase->getAlignment(),
6108 false/*isVolatile*/, true/*ReadMem*/,
6111 // Make sure the newly-created LOAD is in the same position as LDBase in
6112 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6113 // update uses of LDBase's output chain to use the TokenFactor.
6114 if (LDBase->hasAnyUseOfValue(1)) {
6115 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6116 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6117 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6118 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6119 SDValue(ResNode.getNode(), 1));
6122 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6127 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6128 /// to generate a splat value for the following cases:
6129 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6130 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6131 /// a scalar load, or a constant.
6132 /// The VBROADCAST node is returned when a pattern is found,
6133 /// or SDValue() otherwise.
6134 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6135 SelectionDAG &DAG) {
6136 // VBROADCAST requires AVX.
6137 // TODO: Splats could be generated for non-AVX CPUs using SSE
6138 // instructions, but there's less potential gain for only 128-bit vectors.
6139 if (!Subtarget->hasAVX())
6142 MVT VT = Op.getSimpleValueType();
6145 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6146 "Unsupported vector type for broadcast.");
6151 switch (Op.getOpcode()) {
6153 // Unknown pattern found.
6156 case ISD::BUILD_VECTOR: {
6157 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6158 BitVector UndefElements;
6159 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6161 // We need a splat of a single value to use broadcast, and it doesn't
6162 // make any sense if the value is only in one element of the vector.
6163 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6167 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6168 Ld.getOpcode() == ISD::ConstantFP);
6170 // Make sure that all of the users of a non-constant load are from the
6171 // BUILD_VECTOR node.
6172 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6177 case ISD::VECTOR_SHUFFLE: {
6178 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6180 // Shuffles must have a splat mask where the first element is
6182 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6185 SDValue Sc = Op.getOperand(0);
6186 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6187 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6189 if (!Subtarget->hasInt256())
6192 // Use the register form of the broadcast instruction available on AVX2.
6193 if (VT.getSizeInBits() >= 256)
6194 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6195 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6198 Ld = Sc.getOperand(0);
6199 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6200 Ld.getOpcode() == ISD::ConstantFP);
6202 // The scalar_to_vector node and the suspected
6203 // load node must have exactly one user.
6204 // Constants may have multiple users.
6206 // AVX-512 has register version of the broadcast
6207 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6208 Ld.getValueType().getSizeInBits() >= 32;
6209 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6216 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6217 bool IsGE256 = (VT.getSizeInBits() >= 256);
6219 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6220 // instruction to save 8 or more bytes of constant pool data.
6221 // TODO: If multiple splats are generated to load the same constant,
6222 // it may be detrimental to overall size. There needs to be a way to detect
6223 // that condition to know if this is truly a size win.
6224 const Function *F = DAG.getMachineFunction().getFunction();
6225 bool OptForSize = F->getAttributes().
6226 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
6228 // Handle broadcasting a single constant scalar from the constant pool
6230 // On Sandybridge (no AVX2), it is still better to load a constant vector
6231 // from the constant pool and not to broadcast it from a scalar.
6232 // But override that restriction when optimizing for size.
6233 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6234 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6235 EVT CVT = Ld.getValueType();
6236 assert(!CVT.isVector() && "Must not broadcast a vector type");
6238 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6239 // For size optimization, also splat v2f64 and v2i64, and for size opt
6240 // with AVX2, also splat i8 and i16.
6241 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6242 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6243 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6244 const Constant *C = nullptr;
6245 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6246 C = CI->getConstantIntValue();
6247 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6248 C = CF->getConstantFPValue();
6250 assert(C && "Invalid constant type");
6252 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6253 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6254 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6255 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6256 MachinePointerInfo::getConstantPool(),
6257 false, false, false, Alignment);
6259 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6263 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6265 // Handle AVX2 in-register broadcasts.
6266 if (!IsLoad && Subtarget->hasInt256() &&
6267 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6268 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6270 // The scalar source must be a normal load.
6274 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6275 (Subtarget->hasVLX() && ScalarSize == 64))
6276 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6278 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6279 // double since there is no vbroadcastsd xmm
6280 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6281 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6282 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6285 // Unsupported broadcast.
6289 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6290 /// underlying vector and index.
6292 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6294 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6296 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6297 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6300 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6302 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6304 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6305 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6308 // In this case the vector is the extract_subvector expression and the index
6309 // is 2, as specified by the shuffle.
6310 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6311 SDValue ShuffleVec = SVOp->getOperand(0);
6312 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6313 assert(ShuffleVecVT.getVectorElementType() ==
6314 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6316 int ShuffleIdx = SVOp->getMaskElt(Idx);
6317 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6318 ExtractedFromVec = ShuffleVec;
6324 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6325 MVT VT = Op.getSimpleValueType();
6327 // Skip if insert_vec_elt is not supported.
6328 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6329 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6333 unsigned NumElems = Op.getNumOperands();
6337 SmallVector<unsigned, 4> InsertIndices;
6338 SmallVector<int, 8> Mask(NumElems, -1);
6340 for (unsigned i = 0; i != NumElems; ++i) {
6341 unsigned Opc = Op.getOperand(i).getOpcode();
6343 if (Opc == ISD::UNDEF)
6346 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6347 // Quit if more than 1 elements need inserting.
6348 if (InsertIndices.size() > 1)
6351 InsertIndices.push_back(i);
6355 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6356 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6357 // Quit if non-constant index.
6358 if (!isa<ConstantSDNode>(ExtIdx))
6360 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6362 // Quit if extracted from vector of different type.
6363 if (ExtractedFromVec.getValueType() != VT)
6366 if (!VecIn1.getNode())
6367 VecIn1 = ExtractedFromVec;
6368 else if (VecIn1 != ExtractedFromVec) {
6369 if (!VecIn2.getNode())
6370 VecIn2 = ExtractedFromVec;
6371 else if (VecIn2 != ExtractedFromVec)
6372 // Quit if more than 2 vectors to shuffle
6376 if (ExtractedFromVec == VecIn1)
6378 else if (ExtractedFromVec == VecIn2)
6379 Mask[i] = Idx + NumElems;
6382 if (!VecIn1.getNode())
6385 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6386 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6387 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6388 unsigned Idx = InsertIndices[i];
6389 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6390 DAG.getIntPtrConstant(Idx));
6396 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6398 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6400 MVT VT = Op.getSimpleValueType();
6401 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6402 "Unexpected type in LowerBUILD_VECTORvXi1!");
6405 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6406 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6407 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6408 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6411 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6412 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6413 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6414 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6417 bool AllContants = true;
6418 uint64_t Immediate = 0;
6419 int NonConstIdx = -1;
6420 bool IsSplat = true;
6421 unsigned NumNonConsts = 0;
6422 unsigned NumConsts = 0;
6423 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6424 SDValue In = Op.getOperand(idx);
6425 if (In.getOpcode() == ISD::UNDEF)
6427 if (!isa<ConstantSDNode>(In)) {
6428 AllContants = false;
6433 if (cast<ConstantSDNode>(In)->getZExtValue())
6434 Immediate |= (1ULL << idx);
6436 if (In != Op.getOperand(0))
6441 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6442 DAG.getConstant(Immediate, MVT::i16));
6443 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6444 DAG.getIntPtrConstant(0));
6447 if (NumNonConsts == 1 && NonConstIdx != 0) {
6450 SDValue VecAsImm = DAG.getConstant(Immediate,
6451 MVT::getIntegerVT(VT.getSizeInBits()));
6452 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6455 DstVec = DAG.getUNDEF(VT);
6456 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6457 Op.getOperand(NonConstIdx),
6458 DAG.getIntPtrConstant(NonConstIdx));
6460 if (!IsSplat && (NonConstIdx != 0))
6461 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6462 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6465 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6466 DAG.getConstant(-1, SelectVT),
6467 DAG.getConstant(0, SelectVT));
6469 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6470 DAG.getConstant((Immediate | 1), SelectVT),
6471 DAG.getConstant(Immediate, SelectVT));
6472 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6475 /// \brief Return true if \p N implements a horizontal binop and return the
6476 /// operands for the horizontal binop into V0 and V1.
6478 /// This is a helper function of PerformBUILD_VECTORCombine.
6479 /// This function checks that the build_vector \p N in input implements a
6480 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6481 /// operation to match.
6482 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6483 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6484 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6487 /// This function only analyzes elements of \p N whose indices are
6488 /// in range [BaseIdx, LastIdx).
6489 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6491 unsigned BaseIdx, unsigned LastIdx,
6492 SDValue &V0, SDValue &V1) {
6493 EVT VT = N->getValueType(0);
6495 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6496 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6497 "Invalid Vector in input!");
6499 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6500 bool CanFold = true;
6501 unsigned ExpectedVExtractIdx = BaseIdx;
6502 unsigned NumElts = LastIdx - BaseIdx;
6503 V0 = DAG.getUNDEF(VT);
6504 V1 = DAG.getUNDEF(VT);
6506 // Check if N implements a horizontal binop.
6507 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6508 SDValue Op = N->getOperand(i + BaseIdx);
6511 if (Op->getOpcode() == ISD::UNDEF) {
6512 // Update the expected vector extract index.
6513 if (i * 2 == NumElts)
6514 ExpectedVExtractIdx = BaseIdx;
6515 ExpectedVExtractIdx += 2;
6519 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6524 SDValue Op0 = Op.getOperand(0);
6525 SDValue Op1 = Op.getOperand(1);
6527 // Try to match the following pattern:
6528 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6529 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6530 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6531 Op0.getOperand(0) == Op1.getOperand(0) &&
6532 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6533 isa<ConstantSDNode>(Op1.getOperand(1)));
6537 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6538 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6540 if (i * 2 < NumElts) {
6541 if (V0.getOpcode() == ISD::UNDEF)
6542 V0 = Op0.getOperand(0);
6544 if (V1.getOpcode() == ISD::UNDEF)
6545 V1 = Op0.getOperand(0);
6546 if (i * 2 == NumElts)
6547 ExpectedVExtractIdx = BaseIdx;
6550 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6551 if (I0 == ExpectedVExtractIdx)
6552 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6553 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6554 // Try to match the following dag sequence:
6555 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6556 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6560 ExpectedVExtractIdx += 2;
6566 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6567 /// a concat_vector.
6569 /// This is a helper function of PerformBUILD_VECTORCombine.
6570 /// This function expects two 256-bit vectors called V0 and V1.
6571 /// At first, each vector is split into two separate 128-bit vectors.
6572 /// Then, the resulting 128-bit vectors are used to implement two
6573 /// horizontal binary operations.
6575 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6577 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6578 /// the two new horizontal binop.
6579 /// When Mode is set, the first horizontal binop dag node would take as input
6580 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6581 /// horizontal binop dag node would take as input the lower 128-bit of V1
6582 /// and the upper 128-bit of V1.
6584 /// HADD V0_LO, V0_HI
6585 /// HADD V1_LO, V1_HI
6587 /// Otherwise, the first horizontal binop dag node takes as input the lower
6588 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6589 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6591 /// HADD V0_LO, V1_LO
6592 /// HADD V0_HI, V1_HI
6594 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6595 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6596 /// the upper 128-bits of the result.
6597 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6598 SDLoc DL, SelectionDAG &DAG,
6599 unsigned X86Opcode, bool Mode,
6600 bool isUndefLO, bool isUndefHI) {
6601 EVT VT = V0.getValueType();
6602 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6603 "Invalid nodes in input!");
6605 unsigned NumElts = VT.getVectorNumElements();
6606 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6607 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6608 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6609 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6610 EVT NewVT = V0_LO.getValueType();
6612 SDValue LO = DAG.getUNDEF(NewVT);
6613 SDValue HI = DAG.getUNDEF(NewVT);
6616 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6617 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6618 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6619 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6620 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6622 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6623 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6624 V1_LO->getOpcode() != ISD::UNDEF))
6625 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6627 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6628 V1_HI->getOpcode() != ISD::UNDEF))
6629 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6632 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6635 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6636 /// sequence of 'vadd + vsub + blendi'.
6637 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6638 const X86Subtarget *Subtarget) {
6640 EVT VT = BV->getValueType(0);
6641 unsigned NumElts = VT.getVectorNumElements();
6642 SDValue InVec0 = DAG.getUNDEF(VT);
6643 SDValue InVec1 = DAG.getUNDEF(VT);
6645 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6646 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6648 // Odd-numbered elements in the input build vector are obtained from
6649 // adding two integer/float elements.
6650 // Even-numbered elements in the input build vector are obtained from
6651 // subtracting two integer/float elements.
6652 unsigned ExpectedOpcode = ISD::FSUB;
6653 unsigned NextExpectedOpcode = ISD::FADD;
6654 bool AddFound = false;
6655 bool SubFound = false;
6657 for (unsigned i = 0, e = NumElts; i != e; i++) {
6658 SDValue Op = BV->getOperand(i);
6660 // Skip 'undef' values.
6661 unsigned Opcode = Op.getOpcode();
6662 if (Opcode == ISD::UNDEF) {
6663 std::swap(ExpectedOpcode, NextExpectedOpcode);
6667 // Early exit if we found an unexpected opcode.
6668 if (Opcode != ExpectedOpcode)
6671 SDValue Op0 = Op.getOperand(0);
6672 SDValue Op1 = Op.getOperand(1);
6674 // Try to match the following pattern:
6675 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6676 // Early exit if we cannot match that sequence.
6677 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6678 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6679 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6680 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6681 Op0.getOperand(1) != Op1.getOperand(1))
6684 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6688 // We found a valid add/sub node. Update the information accordingly.
6694 // Update InVec0 and InVec1.
6695 if (InVec0.getOpcode() == ISD::UNDEF)
6696 InVec0 = Op0.getOperand(0);
6697 if (InVec1.getOpcode() == ISD::UNDEF)
6698 InVec1 = Op1.getOperand(0);
6700 // Make sure that operands in input to each add/sub node always
6701 // come from a same pair of vectors.
6702 if (InVec0 != Op0.getOperand(0)) {
6703 if (ExpectedOpcode == ISD::FSUB)
6706 // FADD is commutable. Try to commute the operands
6707 // and then test again.
6708 std::swap(Op0, Op1);
6709 if (InVec0 != Op0.getOperand(0))
6713 if (InVec1 != Op1.getOperand(0))
6716 // Update the pair of expected opcodes.
6717 std::swap(ExpectedOpcode, NextExpectedOpcode);
6720 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6721 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6722 InVec1.getOpcode() != ISD::UNDEF)
6723 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6728 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6729 const X86Subtarget *Subtarget) {
6731 EVT VT = N->getValueType(0);
6732 unsigned NumElts = VT.getVectorNumElements();
6733 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6734 SDValue InVec0, InVec1;
6736 // Try to match an ADDSUB.
6737 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6738 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6739 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6740 if (Value.getNode())
6744 // Try to match horizontal ADD/SUB.
6745 unsigned NumUndefsLO = 0;
6746 unsigned NumUndefsHI = 0;
6747 unsigned Half = NumElts/2;
6749 // Count the number of UNDEF operands in the build_vector in input.
6750 for (unsigned i = 0, e = Half; i != e; ++i)
6751 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6754 for (unsigned i = Half, e = NumElts; i != e; ++i)
6755 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6758 // Early exit if this is either a build_vector of all UNDEFs or all the
6759 // operands but one are UNDEF.
6760 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6763 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6764 // Try to match an SSE3 float HADD/HSUB.
6765 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6766 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6768 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6769 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6770 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6771 // Try to match an SSSE3 integer HADD/HSUB.
6772 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6773 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6775 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6776 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6779 if (!Subtarget->hasAVX())
6782 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6783 // Try to match an AVX horizontal add/sub of packed single/double
6784 // precision floating point values from 256-bit vectors.
6785 SDValue InVec2, InVec3;
6786 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6787 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6788 ((InVec0.getOpcode() == ISD::UNDEF ||
6789 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6790 ((InVec1.getOpcode() == ISD::UNDEF ||
6791 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6792 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6794 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6795 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6796 ((InVec0.getOpcode() == ISD::UNDEF ||
6797 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6798 ((InVec1.getOpcode() == ISD::UNDEF ||
6799 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6800 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6801 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6802 // Try to match an AVX2 horizontal add/sub of signed integers.
6803 SDValue InVec2, InVec3;
6805 bool CanFold = true;
6807 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6808 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6809 ((InVec0.getOpcode() == ISD::UNDEF ||
6810 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6811 ((InVec1.getOpcode() == ISD::UNDEF ||
6812 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6813 X86Opcode = X86ISD::HADD;
6814 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6815 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6816 ((InVec0.getOpcode() == ISD::UNDEF ||
6817 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6818 ((InVec1.getOpcode() == ISD::UNDEF ||
6819 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6820 X86Opcode = X86ISD::HSUB;
6825 // Fold this build_vector into a single horizontal add/sub.
6826 // Do this only if the target has AVX2.
6827 if (Subtarget->hasAVX2())
6828 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6830 // Do not try to expand this build_vector into a pair of horizontal
6831 // add/sub if we can emit a pair of scalar add/sub.
6832 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6835 // Convert this build_vector into a pair of horizontal binop followed by
6837 bool isUndefLO = NumUndefsLO == Half;
6838 bool isUndefHI = NumUndefsHI == Half;
6839 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6840 isUndefLO, isUndefHI);
6844 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6845 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6847 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6848 X86Opcode = X86ISD::HADD;
6849 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6850 X86Opcode = X86ISD::HSUB;
6851 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6852 X86Opcode = X86ISD::FHADD;
6853 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6854 X86Opcode = X86ISD::FHSUB;
6858 // Don't try to expand this build_vector into a pair of horizontal add/sub
6859 // if we can simply emit a pair of scalar add/sub.
6860 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6863 // Convert this build_vector into two horizontal add/sub followed by
6865 bool isUndefLO = NumUndefsLO == Half;
6866 bool isUndefHI = NumUndefsHI == Half;
6867 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6868 isUndefLO, isUndefHI);
6875 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6878 MVT VT = Op.getSimpleValueType();
6879 MVT ExtVT = VT.getVectorElementType();
6880 unsigned NumElems = Op.getNumOperands();
6882 // Generate vectors for predicate vectors.
6883 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6884 return LowerBUILD_VECTORvXi1(Op, DAG);
6886 // Vectors containing all zeros can be matched by pxor and xorps later
6887 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6888 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6889 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6890 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6893 return getZeroVector(VT, Subtarget, DAG, dl);
6896 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6897 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6898 // vpcmpeqd on 256-bit vectors.
6899 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6900 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6903 if (!VT.is512BitVector())
6904 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6907 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6908 if (Broadcast.getNode())
6911 unsigned EVTBits = ExtVT.getSizeInBits();
6913 unsigned NumZero = 0;
6914 unsigned NumNonZero = 0;
6915 unsigned NonZeros = 0;
6916 bool IsAllConstants = true;
6917 SmallSet<SDValue, 8> Values;
6918 for (unsigned i = 0; i < NumElems; ++i) {
6919 SDValue Elt = Op.getOperand(i);
6920 if (Elt.getOpcode() == ISD::UNDEF)
6923 if (Elt.getOpcode() != ISD::Constant &&
6924 Elt.getOpcode() != ISD::ConstantFP)
6925 IsAllConstants = false;
6926 if (X86::isZeroNode(Elt))
6929 NonZeros |= (1 << i);
6934 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6935 if (NumNonZero == 0)
6936 return DAG.getUNDEF(VT);
6938 // Special case for single non-zero, non-undef, element.
6939 if (NumNonZero == 1) {
6940 unsigned Idx = countTrailingZeros(NonZeros);
6941 SDValue Item = Op.getOperand(Idx);
6943 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6944 // the value are obviously zero, truncate the value to i32 and do the
6945 // insertion that way. Only do this if the value is non-constant or if the
6946 // value is a constant being inserted into element 0. It is cheaper to do
6947 // a constant pool load than it is to do a movd + shuffle.
6948 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6949 (!IsAllConstants || Idx == 0)) {
6950 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6952 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6953 EVT VecVT = MVT::v4i32;
6954 unsigned VecElts = 4;
6956 // Truncate the value (which may itself be a constant) to i32, and
6957 // convert it to a vector with movd (S2V+shuffle to zero extend).
6958 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6959 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6961 // If using the new shuffle lowering, just directly insert this.
6962 if (ExperimentalVectorShuffleLowering)
6964 ISD::BITCAST, dl, VT,
6965 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6967 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6969 // Now we have our 32-bit value zero extended in the low element of
6970 // a vector. If Idx != 0, swizzle it into place.
6972 SmallVector<int, 4> Mask;
6973 Mask.push_back(Idx);
6974 for (unsigned i = 1; i != VecElts; ++i)
6976 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6979 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6983 // If we have a constant or non-constant insertion into the low element of
6984 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6985 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6986 // depending on what the source datatype is.
6989 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6991 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6992 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6993 if (VT.is256BitVector() || VT.is512BitVector()) {
6994 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6995 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6996 Item, DAG.getIntPtrConstant(0));
6998 assert(VT.is128BitVector() && "Expected an SSE value type!");
6999 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7000 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7001 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7004 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7005 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7006 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7007 if (VT.is256BitVector()) {
7008 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7009 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7011 assert(VT.is128BitVector() && "Expected an SSE value type!");
7012 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7014 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7018 // Is it a vector logical left shift?
7019 if (NumElems == 2 && Idx == 1 &&
7020 X86::isZeroNode(Op.getOperand(0)) &&
7021 !X86::isZeroNode(Op.getOperand(1))) {
7022 unsigned NumBits = VT.getSizeInBits();
7023 return getVShift(true, VT,
7024 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7025 VT, Op.getOperand(1)),
7026 NumBits/2, DAG, *this, dl);
7029 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7032 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7033 // is a non-constant being inserted into an element other than the low one,
7034 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7035 // movd/movss) to move this into the low element, then shuffle it into
7037 if (EVTBits == 32) {
7038 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7040 // If using the new shuffle lowering, just directly insert this.
7041 if (ExperimentalVectorShuffleLowering)
7042 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7044 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7045 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7046 SmallVector<int, 8> MaskVec;
7047 for (unsigned i = 0; i != NumElems; ++i)
7048 MaskVec.push_back(i == Idx ? 0 : 1);
7049 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7053 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7054 if (Values.size() == 1) {
7055 if (EVTBits == 32) {
7056 // Instead of a shuffle like this:
7057 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7058 // Check if it's possible to issue this instead.
7059 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7060 unsigned Idx = countTrailingZeros(NonZeros);
7061 SDValue Item = Op.getOperand(Idx);
7062 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7063 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7068 // A vector full of immediates; various special cases are already
7069 // handled, so this is best done with a single constant-pool load.
7073 // For AVX-length vectors, see if we can use a vector load to get all of the
7074 // elements, otherwise build the individual 128-bit pieces and use
7075 // shuffles to put them in place.
7076 if (VT.is256BitVector() || VT.is512BitVector()) {
7077 SmallVector<SDValue, 64> V;
7078 for (unsigned i = 0; i != NumElems; ++i)
7079 V.push_back(Op.getOperand(i));
7081 // Check for a build vector of consecutive loads.
7082 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7085 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7087 // Build both the lower and upper subvector.
7088 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7089 makeArrayRef(&V[0], NumElems/2));
7090 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7091 makeArrayRef(&V[NumElems / 2], NumElems/2));
7093 // Recreate the wider vector with the lower and upper part.
7094 if (VT.is256BitVector())
7095 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7099 // Let legalizer expand 2-wide build_vectors.
7100 if (EVTBits == 64) {
7101 if (NumNonZero == 1) {
7102 // One half is zero or undef.
7103 unsigned Idx = countTrailingZeros(NonZeros);
7104 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7105 Op.getOperand(Idx));
7106 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7111 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7112 if (EVTBits == 8 && NumElems == 16) {
7113 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7115 if (V.getNode()) return V;
7118 if (EVTBits == 16 && NumElems == 8) {
7119 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7121 if (V.getNode()) return V;
7124 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7125 if (EVTBits == 32 && NumElems == 4) {
7126 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7131 // If element VT is == 32 bits, turn it into a number of shuffles.
7132 SmallVector<SDValue, 8> V(NumElems);
7133 if (NumElems == 4 && NumZero > 0) {
7134 for (unsigned i = 0; i < 4; ++i) {
7135 bool isZero = !(NonZeros & (1 << i));
7137 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7139 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7142 for (unsigned i = 0; i < 2; ++i) {
7143 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7146 V[i] = V[i*2]; // Must be a zero vector.
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7152 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7155 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7160 bool Reverse1 = (NonZeros & 0x3) == 2;
7161 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7165 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7166 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7168 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7171 if (Values.size() > 1 && VT.is128BitVector()) {
7172 // Check for a build vector of consecutive loads.
7173 for (unsigned i = 0; i < NumElems; ++i)
7174 V[i] = Op.getOperand(i);
7176 // Check for elements which are consecutive loads.
7177 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7181 // Check for a build vector from mostly shuffle plus few inserting.
7182 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7186 // For SSE 4.1, use insertps to put the high elements into the low element.
7187 if (Subtarget->hasSSE41()) {
7189 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7190 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7192 Result = DAG.getUNDEF(VT);
7194 for (unsigned i = 1; i < NumElems; ++i) {
7195 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7196 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7197 Op.getOperand(i), DAG.getIntPtrConstant(i));
7202 // Otherwise, expand into a number of unpckl*, start by extending each of
7203 // our (non-undef) elements to the full vector width with the element in the
7204 // bottom slot of the vector (which generates no code for SSE).
7205 for (unsigned i = 0; i < NumElems; ++i) {
7206 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7207 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7209 V[i] = DAG.getUNDEF(VT);
7212 // Next, we iteratively mix elements, e.g. for v4f32:
7213 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7214 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7215 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7216 unsigned EltStride = NumElems >> 1;
7217 while (EltStride != 0) {
7218 for (unsigned i = 0; i < EltStride; ++i) {
7219 // If V[i+EltStride] is undef and this is the first round of mixing,
7220 // then it is safe to just drop this shuffle: V[i] is already in the
7221 // right place, the one element (since it's the first round) being
7222 // inserted as undef can be dropped. This isn't safe for successive
7223 // rounds because they will permute elements within both vectors.
7224 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7225 EltStride == NumElems/2)
7228 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7237 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7238 // to create 256-bit vectors from two other 128-bit ones.
7239 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7241 MVT ResVT = Op.getSimpleValueType();
7243 assert((ResVT.is256BitVector() ||
7244 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7246 SDValue V1 = Op.getOperand(0);
7247 SDValue V2 = Op.getOperand(1);
7248 unsigned NumElems = ResVT.getVectorNumElements();
7249 if(ResVT.is256BitVector())
7250 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7252 if (Op.getNumOperands() == 4) {
7253 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7254 ResVT.getVectorNumElements()/2);
7255 SDValue V3 = Op.getOperand(2);
7256 SDValue V4 = Op.getOperand(3);
7257 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7258 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7260 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7263 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7264 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7265 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7266 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7267 Op.getNumOperands() == 4)));
7269 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7270 // from two other 128-bit ones.
7272 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7273 return LowerAVXCONCAT_VECTORS(Op, DAG);
7277 //===----------------------------------------------------------------------===//
7278 // Vector shuffle lowering
7280 // This is an experimental code path for lowering vector shuffles on x86. It is
7281 // designed to handle arbitrary vector shuffles and blends, gracefully
7282 // degrading performance as necessary. It works hard to recognize idiomatic
7283 // shuffles and lower them to optimal instruction patterns without leaving
7284 // a framework that allows reasonably efficient handling of all vector shuffle
7286 //===----------------------------------------------------------------------===//
7288 /// \brief Tiny helper function to identify a no-op mask.
7290 /// This is a somewhat boring predicate function. It checks whether the mask
7291 /// array input, which is assumed to be a single-input shuffle mask of the kind
7292 /// used by the X86 shuffle instructions (not a fully general
7293 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7294 /// in-place shuffle are 'no-op's.
7295 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7296 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7297 if (Mask[i] != -1 && Mask[i] != i)
7302 /// \brief Helper function to classify a mask as a single-input mask.
7304 /// This isn't a generic single-input test because in the vector shuffle
7305 /// lowering we canonicalize single inputs to be the first input operand. This
7306 /// means we can more quickly test for a single input by only checking whether
7307 /// an input from the second operand exists. We also assume that the size of
7308 /// mask corresponds to the size of the input vectors which isn't true in the
7309 /// fully general case.
7310 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7312 if (M >= (int)Mask.size())
7317 /// \brief Test whether there are elements crossing 128-bit lanes in this
7320 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7321 /// and we routinely test for these.
7322 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7323 int LaneSize = 128 / VT.getScalarSizeInBits();
7324 int Size = Mask.size();
7325 for (int i = 0; i < Size; ++i)
7326 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7331 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7333 /// This checks a shuffle mask to see if it is performing the same
7334 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7335 /// that it is also not lane-crossing. It may however involve a blend from the
7336 /// same lane of a second vector.
7338 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7339 /// non-trivial to compute in the face of undef lanes. The representation is
7340 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7341 /// entries from both V1 and V2 inputs to the wider mask.
7343 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7344 SmallVectorImpl<int> &RepeatedMask) {
7345 int LaneSize = 128 / VT.getScalarSizeInBits();
7346 RepeatedMask.resize(LaneSize, -1);
7347 int Size = Mask.size();
7348 for (int i = 0; i < Size; ++i) {
7351 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7352 // This entry crosses lanes, so there is no way to model this shuffle.
7355 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7356 if (RepeatedMask[i % LaneSize] == -1)
7357 // This is the first non-undef entry in this slot of a 128-bit lane.
7358 RepeatedMask[i % LaneSize] =
7359 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7360 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7361 // Found a mismatch with the repeated mask.
7367 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7368 // 2013 will allow us to use it as a non-type template parameter.
7371 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7373 /// See its documentation for details.
7374 bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
7375 if (Mask.size() != Args.size())
7377 for (int i = 0, e = Mask.size(); i < e; ++i) {
7378 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7379 if (Mask[i] != -1 && Mask[i] != *Args[i])
7387 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7390 /// This is a fast way to test a shuffle mask against a fixed pattern:
7392 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7394 /// It returns true if the mask is exactly as wide as the argument list, and
7395 /// each element of the mask is either -1 (signifying undef) or the value given
7396 /// in the argument.
7397 static const VariadicFunction1<
7398 bool, ArrayRef<int>, int, isShuffleEquivalentImpl> isShuffleEquivalent = {};
7400 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7402 /// This helper function produces an 8-bit shuffle immediate corresponding to
7403 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7404 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7407 /// NB: We rely heavily on "undef" masks preserving the input lane.
7408 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7409 SelectionDAG &DAG) {
7410 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7411 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7412 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7413 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7414 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7417 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7418 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7419 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7420 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7421 return DAG.getConstant(Imm, MVT::i8);
7424 /// \brief Try to emit a blend instruction for a shuffle.
7426 /// This doesn't do any checks for the availability of instructions for blending
7427 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7428 /// be matched in the backend with the type given. What it does check for is
7429 /// that the shuffle mask is in fact a blend.
7430 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7431 SDValue V2, ArrayRef<int> Mask,
7432 const X86Subtarget *Subtarget,
7433 SelectionDAG &DAG) {
7435 unsigned BlendMask = 0;
7436 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7437 if (Mask[i] >= Size) {
7438 if (Mask[i] != i + Size)
7439 return SDValue(); // Shuffled V2 input!
7440 BlendMask |= 1u << i;
7443 if (Mask[i] >= 0 && Mask[i] != i)
7444 return SDValue(); // Shuffled V1 input!
7446 switch (VT.SimpleTy) {
7451 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7452 DAG.getConstant(BlendMask, MVT::i8));
7456 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7460 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7461 // that instruction.
7462 if (Subtarget->hasAVX2()) {
7463 // Scale the blend by the number of 32-bit dwords per element.
7464 int Scale = VT.getScalarSizeInBits() / 32;
7466 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7467 if (Mask[i] >= Size)
7468 for (int j = 0; j < Scale; ++j)
7469 BlendMask |= 1u << (i * Scale + j);
7471 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7472 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7473 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7474 return DAG.getNode(ISD::BITCAST, DL, VT,
7475 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7476 DAG.getConstant(BlendMask, MVT::i8)));
7480 // For integer shuffles we need to expand the mask and cast the inputs to
7481 // v8i16s prior to blending.
7482 int Scale = 8 / VT.getVectorNumElements();
7484 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7485 if (Mask[i] >= Size)
7486 for (int j = 0; j < Scale; ++j)
7487 BlendMask |= 1u << (i * Scale + j);
7489 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7490 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7491 return DAG.getNode(ISD::BITCAST, DL, VT,
7492 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7493 DAG.getConstant(BlendMask, MVT::i8)));
7497 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7498 SmallVector<int, 8> RepeatedMask;
7499 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7500 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7501 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7503 for (int i = 0; i < 8; ++i)
7504 if (RepeatedMask[i] >= 16)
7505 BlendMask |= 1u << i;
7506 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7507 DAG.getConstant(BlendMask, MVT::i8));
7512 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7513 // Scale the blend by the number of bytes per element.
7514 int Scale = VT.getScalarSizeInBits() / 8;
7515 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7517 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7518 // mix of LLVM's code generator and the x86 backend. We tell the code
7519 // generator that boolean values in the elements of an x86 vector register
7520 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7521 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7522 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7523 // of the element (the remaining are ignored) and 0 in that high bit would
7524 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7525 // the LLVM model for boolean values in vector elements gets the relevant
7526 // bit set, it is set backwards and over constrained relative to x86's
7528 SDValue VSELECTMask[32];
7529 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7530 for (int j = 0; j < Scale; ++j)
7531 VSELECTMask[Scale * i + j] =
7532 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7533 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7535 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7536 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7538 ISD::BITCAST, DL, VT,
7539 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7540 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7545 llvm_unreachable("Not a supported integer vector type!");
7549 /// \brief Generic routine to lower a shuffle and blend as a decomposed set of
7550 /// unblended shuffles followed by an unshuffled blend.
7552 /// This matches the extremely common pattern for handling combined
7553 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7555 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7559 SelectionDAG &DAG) {
7560 // Shuffle the input elements into the desired positions in V1 and V2 and
7561 // blend them together.
7562 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7563 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7564 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7565 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7566 if (Mask[i] >= 0 && Mask[i] < Size) {
7567 V1Mask[i] = Mask[i];
7569 } else if (Mask[i] >= Size) {
7570 V2Mask[i] = Mask[i] - Size;
7571 BlendMask[i] = i + Size;
7574 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7575 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7576 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7579 /// \brief Try to lower a vector shuffle as a byte rotation.
7581 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7582 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7583 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7584 /// try to generically lower a vector shuffle through such an pattern. It
7585 /// does not check for the profitability of lowering either as PALIGNR or
7586 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7587 /// This matches shuffle vectors that look like:
7589 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7591 /// Essentially it concatenates V1 and V2, shifts right by some number of
7592 /// elements, and takes the low elements as the result. Note that while this is
7593 /// specified as a *right shift* because x86 is little-endian, it is a *left
7594 /// rotate* of the vector lanes.
7596 /// Note that this only handles 128-bit vector widths currently.
7597 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7600 const X86Subtarget *Subtarget,
7601 SelectionDAG &DAG) {
7602 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7604 // We need to detect various ways of spelling a rotation:
7605 // [11, 12, 13, 14, 15, 0, 1, 2]
7606 // [-1, 12, 13, 14, -1, -1, 1, -1]
7607 // [-1, -1, -1, -1, -1, -1, 1, 2]
7608 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7609 // [-1, 4, 5, 6, -1, -1, 9, -1]
7610 // [-1, 4, 5, 6, -1, -1, -1, -1]
7613 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7616 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7618 // Based on the mod-Size value of this mask element determine where
7619 // a rotated vector would have started.
7620 int StartIdx = i - (Mask[i] % Size);
7622 // The identity rotation isn't interesting, stop.
7625 // If we found the tail of a vector the rotation must be the missing
7626 // front. If we found the head of a vector, it must be how much of the head.
7627 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7630 Rotation = CandidateRotation;
7631 else if (Rotation != CandidateRotation)
7632 // The rotations don't match, so we can't match this mask.
7635 // Compute which value this mask is pointing at.
7636 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7638 // Compute which of the two target values this index should be assigned to.
7639 // This reflects whether the high elements are remaining or the low elements
7641 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7643 // Either set up this value if we've not encountered it before, or check
7644 // that it remains consistent.
7647 else if (TargetV != MaskV)
7648 // This may be a rotation, but it pulls from the inputs in some
7649 // unsupported interleaving.
7653 // Check that we successfully analyzed the mask, and normalize the results.
7654 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7655 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7661 assert(VT.getSizeInBits() == 128 &&
7662 "Rotate-based lowering only supports 128-bit lowering!");
7663 assert(Mask.size() <= 16 &&
7664 "Can shuffle at most 16 bytes in a 128-bit vector!");
7666 // The actual rotate instruction rotates bytes, so we need to scale the
7667 // rotation based on how many bytes are in the vector.
7668 int Scale = 16 / Mask.size();
7670 // SSSE3 targets can use the palignr instruction
7671 if (Subtarget->hasSSSE3()) {
7672 // Cast the inputs to v16i8 to match PALIGNR.
7673 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7674 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7676 return DAG.getNode(ISD::BITCAST, DL, VT,
7677 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7678 DAG.getConstant(Rotation * Scale, MVT::i8)));
7681 // Default SSE2 implementation
7682 int LoByteShift = 16 - Rotation * Scale;
7683 int HiByteShift = Rotation * Scale;
7685 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7686 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7687 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7689 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7690 DAG.getConstant(8 * LoByteShift, MVT::i8));
7691 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7692 DAG.getConstant(8 * HiByteShift, MVT::i8));
7693 return DAG.getNode(ISD::BITCAST, DL, VT,
7694 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7697 /// \brief Compute whether each element of a shuffle is zeroable.
7699 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7700 /// Either it is an undef element in the shuffle mask, the element of the input
7701 /// referenced is undef, or the element of the input referenced is known to be
7702 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7703 /// as many lanes with this technique as possible to simplify the remaining
7705 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7706 SDValue V1, SDValue V2) {
7707 SmallBitVector Zeroable(Mask.size(), false);
7709 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7710 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7712 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7714 // Handle the easy cases.
7715 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7720 // If this is an index into a build_vector node, dig out the input value and
7722 SDValue V = M < Size ? V1 : V2;
7723 if (V.getOpcode() != ISD::BUILD_VECTOR)
7726 SDValue Input = V.getOperand(M % Size);
7727 // The UNDEF opcode check really should be dead code here, but not quite
7728 // worth asserting on (it isn't invalid, just unexpected).
7729 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7736 /// \brief Try to emit a bitmask instruction for a shuffle.
7738 /// This handles cases where we can model a blend exactly as a bitmask due to
7739 /// one of the inputs being zeroable.
7740 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7741 SDValue V2, ArrayRef<int> Mask,
7742 SelectionDAG &DAG) {
7743 MVT EltVT = VT.getScalarType();
7744 int NumEltBits = EltVT.getSizeInBits();
7745 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7746 SDValue Zero = DAG.getConstant(0, IntEltVT);
7747 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7748 if (EltVT.isFloatingPoint()) {
7749 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7750 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7752 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7753 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7755 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7758 if (Mask[i] % Size != i)
7759 return SDValue(); // Not a blend.
7761 V = Mask[i] < Size ? V1 : V2;
7762 else if (V != (Mask[i] < Size ? V1 : V2))
7763 return SDValue(); // Can only let one input through the mask.
7765 VMaskOps[i] = AllOnes;
7768 return SDValue(); // No non-zeroable elements!
7770 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7771 V = DAG.getNode(VT.isFloatingPoint()
7772 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7777 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7779 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
7780 /// byte-shift instructions. The mask must consist of a shifted sequential
7781 /// shuffle from one of the input vectors and zeroable elements for the
7782 /// remaining 'shifted in' elements.
7784 /// Note that this only handles 128-bit vector widths currently.
7785 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7786 SDValue V2, ArrayRef<int> Mask,
7787 SelectionDAG &DAG) {
7788 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7790 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7792 int Size = Mask.size();
7793 int Scale = 16 / Size;
7795 for (int Shift = 1; Shift < Size; Shift++) {
7796 int ByteShift = Shift * Scale;
7798 // PSRLDQ : (little-endian) right byte shift
7799 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7800 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7801 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7802 bool ZeroableRight = true;
7803 for (int i = Size - Shift; i < Size; i++) {
7804 ZeroableRight &= Zeroable[i];
7807 if (ZeroableRight) {
7808 bool ValidShiftRight1 =
7809 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
7810 bool ValidShiftRight2 =
7811 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
7813 if (ValidShiftRight1 || ValidShiftRight2) {
7814 // Cast the inputs to v2i64 to match PSRLDQ.
7815 SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
7816 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7817 SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
7818 DAG.getConstant(ByteShift * 8, MVT::i8));
7819 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7823 // PSLLDQ : (little-endian) left byte shift
7824 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7825 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7826 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7827 bool ZeroableLeft = true;
7828 for (int i = 0; i < Shift; i++) {
7829 ZeroableLeft &= Zeroable[i];
7833 bool ValidShiftLeft1 =
7834 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
7835 bool ValidShiftLeft2 =
7836 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
7838 if (ValidShiftLeft1 || ValidShiftLeft2) {
7839 // Cast the inputs to v2i64 to match PSLLDQ.
7840 SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
7841 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7842 SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
7843 DAG.getConstant(ByteShift * 8, MVT::i8));
7844 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7852 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7854 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7855 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7856 /// elements from one of the input vectors shuffled to the left or right
7857 /// with zeroable elements 'shifted in'.
7858 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7859 SDValue V2, ArrayRef<int> Mask,
7860 SelectionDAG &DAG) {
7861 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7863 int Size = Mask.size();
7864 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7866 // PSRL : (little-endian) right bit shift.
7869 // PSHL : (little-endian) left bit shift.
7871 // [ -1, 4, zz, -1 ]
7872 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7873 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7874 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7875 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7876 "Illegal integer vector type");
7878 bool MatchLeft = true, MatchRight = true;
7879 for (int i = 0; i != Size; i += Scale) {
7880 for (int j = 0; j != Shift; j++) {
7881 MatchLeft &= Zeroable[i + j];
7883 for (int j = Scale - Shift; j != Scale; j++) {
7884 MatchRight &= Zeroable[i + j];
7887 if (!(MatchLeft || MatchRight))
7890 bool MatchV1 = true, MatchV2 = true;
7891 for (int i = 0; i != Size; i += Scale) {
7892 unsigned Pos = MatchLeft ? i + Shift : i;
7893 unsigned Low = MatchLeft ? i : i + Shift;
7894 unsigned Len = Scale - Shift;
7895 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7896 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7898 if (!(MatchV1 || MatchV2))
7901 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7902 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7903 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7904 SDValue V = MatchV1 ? V1 : V2;
7905 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7906 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7907 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7910 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7911 // keep doubling the size of the integer elements up to that. We can
7912 // then shift the elements of the integer vector by whole multiples of
7913 // their width within the elements of the larger integer vector. Test each
7914 // multiple to see if we can find a match with the moved element indices
7915 // and that the shifted in elements are all zeroable.
7916 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7917 for (int Shift = 1; Shift != Scale; Shift++)
7918 if (SDValue BitShift = MatchBitShift(Shift, Scale))
7925 /// \brief Lower a vector shuffle as a zero or any extension.
7927 /// Given a specific number of elements, element bit width, and extension
7928 /// stride, produce either a zero or any extension based on the available
7929 /// features of the subtarget.
7930 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7931 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7932 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7933 assert(Scale > 1 && "Need a scale to extend.");
7934 int NumElements = VT.getVectorNumElements();
7935 int EltBits = VT.getScalarSizeInBits();
7936 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7937 "Only 8, 16, and 32 bit elements can be extended.");
7938 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7940 // Found a valid zext mask! Try various lowering strategies based on the
7941 // input type and available ISA extensions.
7942 if (Subtarget->hasSSE41()) {
7943 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7944 NumElements / Scale);
7945 return DAG.getNode(ISD::BITCAST, DL, VT,
7946 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7949 // For any extends we can cheat for larger element sizes and use shuffle
7950 // instructions that can fold with a load and/or copy.
7951 if (AnyExt && EltBits == 32) {
7952 int PSHUFDMask[4] = {0, -1, 1, -1};
7954 ISD::BITCAST, DL, VT,
7955 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7956 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7957 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
7959 if (AnyExt && EltBits == 16 && Scale > 2) {
7960 int PSHUFDMask[4] = {0, -1, 0, -1};
7961 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7962 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7963 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
7964 int PSHUFHWMask[4] = {1, -1, -1, -1};
7966 ISD::BITCAST, DL, VT,
7967 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
7968 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
7969 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
7972 // If this would require more than 2 unpack instructions to expand, use
7973 // pshufb when available. We can only use more than 2 unpack instructions
7974 // when zero extending i8 elements which also makes it easier to use pshufb.
7975 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
7976 assert(NumElements == 16 && "Unexpected byte vector width!");
7977 SDValue PSHUFBMask[16];
7978 for (int i = 0; i < 16; ++i)
7980 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
7981 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
7982 return DAG.getNode(ISD::BITCAST, DL, VT,
7983 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
7984 DAG.getNode(ISD::BUILD_VECTOR, DL,
7985 MVT::v16i8, PSHUFBMask)));
7988 // Otherwise emit a sequence of unpacks.
7990 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7991 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
7992 : getZeroVector(InputVT, Subtarget, DAG, DL);
7993 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7994 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
7998 } while (Scale > 1);
7999 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8002 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8004 /// This routine will try to do everything in its power to cleverly lower
8005 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8006 /// check for the profitability of this lowering, it tries to aggressively
8007 /// match this pattern. It will use all of the micro-architectural details it
8008 /// can to emit an efficient lowering. It handles both blends with all-zero
8009 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8010 /// masking out later).
8012 /// The reason we have dedicated lowering for zext-style shuffles is that they
8013 /// are both incredibly common and often quite performance sensitive.
8014 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8015 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8016 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8017 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8019 int Bits = VT.getSizeInBits();
8020 int NumElements = VT.getVectorNumElements();
8021 assert(VT.getScalarSizeInBits() <= 32 &&
8022 "Exceeds 32-bit integer zero extension limit");
8023 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8025 // Define a helper function to check a particular ext-scale and lower to it if
8027 auto Lower = [&](int Scale) -> SDValue {
8030 for (int i = 0; i < NumElements; ++i) {
8032 continue; // Valid anywhere but doesn't tell us anything.
8033 if (i % Scale != 0) {
8034 // Each of the extended elements need to be zeroable.
8038 // We no longer are in the anyext case.
8043 // Each of the base elements needs to be consecutive indices into the
8044 // same input vector.
8045 SDValue V = Mask[i] < NumElements ? V1 : V2;
8048 else if (InputV != V)
8049 return SDValue(); // Flip-flopping inputs.
8051 if (Mask[i] % NumElements != i / Scale)
8052 return SDValue(); // Non-consecutive strided elements.
8055 // If we fail to find an input, we have a zero-shuffle which should always
8056 // have already been handled.
8057 // FIXME: Maybe handle this here in case during blending we end up with one?
8061 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8062 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8065 // The widest scale possible for extending is to a 64-bit integer.
8066 assert(Bits % 64 == 0 &&
8067 "The number of bits in a vector must be divisible by 64 on x86!");
8068 int NumExtElements = Bits / 64;
8070 // Each iteration, try extending the elements half as much, but into twice as
8072 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8073 assert(NumElements % NumExtElements == 0 &&
8074 "The input vector size must be divisible by the extended size.");
8075 if (SDValue V = Lower(NumElements / NumExtElements))
8079 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8083 // Returns one of the source operands if the shuffle can be reduced to a
8084 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8085 auto CanZExtLowHalf = [&]() {
8086 for (int i = NumElements / 2; i != NumElements; i++)
8089 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8091 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8096 if (SDValue V = CanZExtLowHalf()) {
8097 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8098 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8099 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8102 // No viable ext lowering found.
8106 /// \brief Try to get a scalar value for a specific element of a vector.
8108 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8109 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8110 SelectionDAG &DAG) {
8111 MVT VT = V.getSimpleValueType();
8112 MVT EltVT = VT.getVectorElementType();
8113 while (V.getOpcode() == ISD::BITCAST)
8114 V = V.getOperand(0);
8115 // If the bitcasts shift the element size, we can't extract an equivalent
8117 MVT NewVT = V.getSimpleValueType();
8118 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8121 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8122 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8123 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8128 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8130 /// This is particularly important because the set of instructions varies
8131 /// significantly based on whether the operand is a load or not.
8132 static bool isShuffleFoldableLoad(SDValue V) {
8133 while (V.getOpcode() == ISD::BITCAST)
8134 V = V.getOperand(0);
8136 return ISD::isNON_EXTLoad(V.getNode());
8139 /// \brief Try to lower insertion of a single element into a zero vector.
8141 /// This is a common pattern that we have especially efficient patterns to lower
8142 /// across all subtarget feature sets.
8143 static SDValue lowerVectorShuffleAsElementInsertion(
8144 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8145 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8146 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8148 MVT EltVT = VT.getVectorElementType();
8150 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8151 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8153 bool IsV1Zeroable = true;
8154 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8155 if (i != V2Index && !Zeroable[i]) {
8156 IsV1Zeroable = false;
8160 // Check for a single input from a SCALAR_TO_VECTOR node.
8161 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8162 // all the smarts here sunk into that routine. However, the current
8163 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8164 // vector shuffle lowering is dead.
8165 if (SDValue V2S = getScalarValueForVectorElement(
8166 V2, Mask[V2Index] - Mask.size(), DAG)) {
8167 // We need to zext the scalar if it is smaller than an i32.
8168 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8169 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8170 // Using zext to expand a narrow element won't work for non-zero
8175 // Zero-extend directly to i32.
8177 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8179 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8180 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8181 EltVT == MVT::i16) {
8182 // Either not inserting from the low element of the input or the input
8183 // element size is too small to use VZEXT_MOVL to clear the high bits.
8187 if (!IsV1Zeroable) {
8188 // If V1 can't be treated as a zero vector we have fewer options to lower
8189 // this. We can't support integer vectors or non-zero targets cheaply, and
8190 // the V1 elements can't be permuted in any way.
8191 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8192 if (!VT.isFloatingPoint() || V2Index != 0)
8194 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8195 V1Mask[V2Index] = -1;
8196 if (!isNoopShuffleMask(V1Mask))
8198 // This is essentially a special case blend operation, but if we have
8199 // general purpose blend operations, they are always faster. Bail and let
8200 // the rest of the lowering handle these as blends.
8201 if (Subtarget->hasSSE41())
8204 // Otherwise, use MOVSD or MOVSS.
8205 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8206 "Only two types of floating point element types to handle!");
8207 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8211 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8213 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8216 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8217 // the desired position. Otherwise it is more efficient to do a vector
8218 // shift left. We know that we can do a vector shift left because all
8219 // the inputs are zero.
8220 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8221 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8222 V2Shuffle[V2Index] = 0;
8223 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8225 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8227 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8229 V2Index * EltVT.getSizeInBits(),
8230 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8231 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8237 /// \brief Try to lower broadcast of a single element.
8239 /// For convenience, this code also bundles all of the subtarget feature set
8240 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8241 /// a convenient way to factor it out.
8242 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8244 const X86Subtarget *Subtarget,
8245 SelectionDAG &DAG) {
8246 if (!Subtarget->hasAVX())
8248 if (VT.isInteger() && !Subtarget->hasAVX2())
8251 // Check that the mask is a broadcast.
8252 int BroadcastIdx = -1;
8254 if (M >= 0 && BroadcastIdx == -1)
8256 else if (M >= 0 && M != BroadcastIdx)
8259 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8260 "a sorted mask where the broadcast "
8263 // Go up the chain of (vector) values to try and find a scalar load that
8264 // we can combine with the broadcast.
8266 switch (V.getOpcode()) {
8267 case ISD::CONCAT_VECTORS: {
8268 int OperandSize = Mask.size() / V.getNumOperands();
8269 V = V.getOperand(BroadcastIdx / OperandSize);
8270 BroadcastIdx %= OperandSize;
8274 case ISD::INSERT_SUBVECTOR: {
8275 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8276 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8280 int BeginIdx = (int)ConstantIdx->getZExtValue();
8282 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8283 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8284 BroadcastIdx -= BeginIdx;
8295 // Check if this is a broadcast of a scalar. We special case lowering
8296 // for scalars so that we can more effectively fold with loads.
8297 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8298 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8299 V = V.getOperand(BroadcastIdx);
8301 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8303 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8305 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8306 // We can't broadcast from a vector register w/o AVX2, and we can only
8307 // broadcast from the zero-element of a vector register.
8311 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8314 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8315 // INSERTPS when the V1 elements are already in the correct locations
8316 // because otherwise we can just always use two SHUFPS instructions which
8317 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8318 // perform INSERTPS if a single V1 element is out of place and all V2
8319 // elements are zeroable.
8320 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8322 SelectionDAG &DAG) {
8323 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8324 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8325 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8326 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8328 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8331 int V1DstIndex = -1;
8332 int V2DstIndex = -1;
8333 bool V1UsedInPlace = false;
8335 for (int i = 0; i < 4; i++) {
8336 // Synthesize a zero mask from the zeroable elements (includes undefs).
8342 // Flag if we use any V1 inputs in place.
8344 V1UsedInPlace = true;
8348 // We can only insert a single non-zeroable element.
8349 if (V1DstIndex != -1 || V2DstIndex != -1)
8353 // V1 input out of place for insertion.
8356 // V2 input for insertion.
8361 // Don't bother if we have no (non-zeroable) element for insertion.
8362 if (V1DstIndex == -1 && V2DstIndex == -1)
8365 // Determine element insertion src/dst indices. The src index is from the
8366 // start of the inserted vector, not the start of the concatenated vector.
8367 unsigned V2SrcIndex = 0;
8368 if (V1DstIndex != -1) {
8369 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8370 // and don't use the original V2 at all.
8371 V2SrcIndex = Mask[V1DstIndex];
8372 V2DstIndex = V1DstIndex;
8375 V2SrcIndex = Mask[V2DstIndex] - 4;
8378 // If no V1 inputs are used in place, then the result is created only from
8379 // the zero mask and the V2 insertion - so remove V1 dependency.
8381 V1 = DAG.getUNDEF(MVT::v4f32);
8383 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8384 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8386 // Insert the V2 element into the desired position.
8388 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8389 DAG.getConstant(InsertPSMask, MVT::i8));
8392 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8394 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8395 /// support for floating point shuffles but not integer shuffles. These
8396 /// instructions will incur a domain crossing penalty on some chips though so
8397 /// it is better to avoid lowering through this for integer vectors where
8399 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8400 const X86Subtarget *Subtarget,
8401 SelectionDAG &DAG) {
8403 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8404 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8405 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8406 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8407 ArrayRef<int> Mask = SVOp->getMask();
8408 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8410 if (isSingleInputShuffleMask(Mask)) {
8411 // Use low duplicate instructions for masks that match their pattern.
8412 if (Subtarget->hasSSE3())
8413 if (isShuffleEquivalent(Mask, 0, 0))
8414 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8416 // Straight shuffle of a single input vector. Simulate this by using the
8417 // single input as both of the "inputs" to this instruction..
8418 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8420 if (Subtarget->hasAVX()) {
8421 // If we have AVX, we can use VPERMILPS which will allow folding a load
8422 // into the shuffle.
8423 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8424 DAG.getConstant(SHUFPDMask, MVT::i8));
8427 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8428 DAG.getConstant(SHUFPDMask, MVT::i8));
8430 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8431 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8433 // Use dedicated unpack instructions for masks that match their pattern.
8434 if (isShuffleEquivalent(Mask, 0, 2))
8435 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8436 if (isShuffleEquivalent(Mask, 1, 3))
8437 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8439 // If we have a single input, insert that into V1 if we can do so cheaply.
8440 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8441 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8442 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8444 // Try inverting the insertion since for v2 masks it is easy to do and we
8445 // can't reliably sort the mask one way or the other.
8446 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8447 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8448 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8449 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8453 // Try to use one of the special instruction patterns to handle two common
8454 // blend patterns if a zero-blend above didn't work.
8455 if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
8456 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8457 // We can either use a special instruction to load over the low double or
8458 // to move just the low double.
8460 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8462 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8464 if (Subtarget->hasSSE41())
8465 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8469 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8470 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8471 DAG.getConstant(SHUFPDMask, MVT::i8));
8474 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8476 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8477 /// the integer unit to minimize domain crossing penalties. However, for blends
8478 /// it falls back to the floating point shuffle operation with appropriate bit
8480 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8481 const X86Subtarget *Subtarget,
8482 SelectionDAG &DAG) {
8484 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8485 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8486 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8487 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8488 ArrayRef<int> Mask = SVOp->getMask();
8489 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8491 if (isSingleInputShuffleMask(Mask)) {
8492 // Check for being able to broadcast a single element.
8493 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8494 Mask, Subtarget, DAG))
8497 // Straight shuffle of a single input vector. For everything from SSE2
8498 // onward this has a single fast instruction with no scary immediates.
8499 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8500 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8501 int WidenedMask[4] = {
8502 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8503 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8505 ISD::BITCAST, DL, MVT::v2i64,
8506 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8507 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8510 // Try to use byte shift instructions.
8511 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8512 DL, MVT::v2i64, V1, V2, Mask, DAG))
8515 // If we have a single input from V2 insert that into V1 if we can do so
8517 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8518 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8519 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8521 // Try inverting the insertion since for v2 masks it is easy to do and we
8522 // can't reliably sort the mask one way or the other.
8523 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8524 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8525 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8526 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8530 // Use dedicated unpack instructions for masks that match their pattern.
8531 if (isShuffleEquivalent(Mask, 0, 2))
8532 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8533 if (isShuffleEquivalent(Mask, 1, 3))
8534 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8536 if (Subtarget->hasSSE41())
8537 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8541 // Try to use byte rotation instructions.
8542 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8543 if (Subtarget->hasSSSE3())
8544 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8545 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8548 // We implement this with SHUFPD which is pretty lame because it will likely
8549 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8550 // However, all the alternatives are still more cycles and newer chips don't
8551 // have this problem. It would be really nice if x86 had better shuffles here.
8552 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8553 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8554 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8555 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8558 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8560 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8561 /// It makes no assumptions about whether this is the *best* lowering, it simply
8563 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8564 ArrayRef<int> Mask, SDValue V1,
8565 SDValue V2, SelectionDAG &DAG) {
8566 SDValue LowV = V1, HighV = V2;
8567 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8570 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8572 if (NumV2Elements == 1) {
8574 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8577 // Compute the index adjacent to V2Index and in the same half by toggling
8579 int V2AdjIndex = V2Index ^ 1;
8581 if (Mask[V2AdjIndex] == -1) {
8582 // Handles all the cases where we have a single V2 element and an undef.
8583 // This will only ever happen in the high lanes because we commute the
8584 // vector otherwise.
8586 std::swap(LowV, HighV);
8587 NewMask[V2Index] -= 4;
8589 // Handle the case where the V2 element ends up adjacent to a V1 element.
8590 // To make this work, blend them together as the first step.
8591 int V1Index = V2AdjIndex;
8592 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8593 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8594 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8596 // Now proceed to reconstruct the final blend as we have the necessary
8597 // high or low half formed.
8604 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8605 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8607 } else if (NumV2Elements == 2) {
8608 if (Mask[0] < 4 && Mask[1] < 4) {
8609 // Handle the easy case where we have V1 in the low lanes and V2 in the
8613 } else if (Mask[2] < 4 && Mask[3] < 4) {
8614 // We also handle the reversed case because this utility may get called
8615 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8616 // arrange things in the right direction.
8622 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8623 // trying to place elements directly, just blend them and set up the final
8624 // shuffle to place them.
8626 // The first two blend mask elements are for V1, the second two are for
8628 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8629 Mask[2] < 4 ? Mask[2] : Mask[3],
8630 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8631 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8632 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8633 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8635 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8638 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8639 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8640 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8641 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8644 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8645 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8648 /// \brief Lower 4-lane 32-bit floating point shuffles.
8650 /// Uses instructions exclusively from the floating point unit to minimize
8651 /// domain crossing penalties, as these are sufficient to implement all v4f32
8653 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8654 const X86Subtarget *Subtarget,
8655 SelectionDAG &DAG) {
8657 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8658 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8659 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8660 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8661 ArrayRef<int> Mask = SVOp->getMask();
8662 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8665 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8667 if (NumV2Elements == 0) {
8668 // Check for being able to broadcast a single element.
8669 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8670 Mask, Subtarget, DAG))
8673 // Use even/odd duplicate instructions for masks that match their pattern.
8674 if (Subtarget->hasSSE3()) {
8675 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
8676 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8677 if (isShuffleEquivalent(Mask, 1, 1, 3, 3))
8678 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8681 if (Subtarget->hasAVX()) {
8682 // If we have AVX, we can use VPERMILPS which will allow folding a load
8683 // into the shuffle.
8684 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8685 getV4X86ShuffleImm8ForMask(Mask, DAG));
8688 // Otherwise, use a straight shuffle of a single input vector. We pass the
8689 // input vector to both operands to simulate this with a SHUFPS.
8690 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8691 getV4X86ShuffleImm8ForMask(Mask, DAG));
8694 // Use dedicated unpack instructions for masks that match their pattern.
8695 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8696 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8697 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8698 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8700 // There are special ways we can lower some single-element blends. However, we
8701 // have custom ways we can lower more complex single-element blends below that
8702 // we defer to if both this and BLENDPS fail to match, so restrict this to
8703 // when the V2 input is targeting element 0 of the mask -- that is the fast
8705 if (NumV2Elements == 1 && Mask[0] >= 4)
8706 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8707 Mask, Subtarget, DAG))
8710 if (Subtarget->hasSSE41()) {
8711 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8715 // Use INSERTPS if we can complete the shuffle efficiently.
8716 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8720 // Otherwise fall back to a SHUFPS lowering strategy.
8721 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8724 /// \brief Lower 4-lane i32 vector shuffles.
8726 /// We try to handle these with integer-domain shuffles where we can, but for
8727 /// blends we use the floating point domain blend instructions.
8728 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8729 const X86Subtarget *Subtarget,
8730 SelectionDAG &DAG) {
8732 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8733 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8734 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8735 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8736 ArrayRef<int> Mask = SVOp->getMask();
8737 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8739 // Whenever we can lower this as a zext, that instruction is strictly faster
8740 // than any alternative. It also allows us to fold memory operands into the
8741 // shuffle in many cases.
8742 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8743 Mask, Subtarget, DAG))
8747 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8749 if (NumV2Elements == 0) {
8750 // Check for being able to broadcast a single element.
8751 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8752 Mask, Subtarget, DAG))
8755 // Straight shuffle of a single input vector. For everything from SSE2
8756 // onward this has a single fast instruction with no scary immediates.
8757 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8758 // but we aren't actually going to use the UNPCK instruction because doing
8759 // so prevents folding a load into this instruction or making a copy.
8760 const int UnpackLoMask[] = {0, 0, 1, 1};
8761 const int UnpackHiMask[] = {2, 2, 3, 3};
8762 if (isShuffleEquivalent(Mask, 0, 0, 1, 1))
8763 Mask = UnpackLoMask;
8764 else if (isShuffleEquivalent(Mask, 2, 2, 3, 3))
8765 Mask = UnpackHiMask;
8767 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8768 getV4X86ShuffleImm8ForMask(Mask, DAG));
8771 // Try to use bit shift instructions.
8772 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8773 DL, MVT::v4i32, V1, V2, Mask, DAG))
8776 // Try to use byte shift instructions.
8777 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8778 DL, MVT::v4i32, V1, V2, Mask, DAG))
8781 // There are special ways we can lower some single-element blends.
8782 if (NumV2Elements == 1)
8783 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8784 Mask, Subtarget, DAG))
8787 if (Subtarget->hasSSE41())
8788 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8792 if (SDValue Masked =
8793 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8796 // Use dedicated unpack instructions for masks that match their pattern.
8797 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8798 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8799 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8800 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8802 // Try to use byte rotation instructions.
8803 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8804 if (Subtarget->hasSSSE3())
8805 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8806 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8809 // We implement this with SHUFPS because it can blend from two vectors.
8810 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8811 // up the inputs, bypassing domain shift penalties that we would encur if we
8812 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8814 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8815 DAG.getVectorShuffle(
8817 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8818 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8821 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8822 /// shuffle lowering, and the most complex part.
8824 /// The lowering strategy is to try to form pairs of input lanes which are
8825 /// targeted at the same half of the final vector, and then use a dword shuffle
8826 /// to place them onto the right half, and finally unpack the paired lanes into
8827 /// their final position.
8829 /// The exact breakdown of how to form these dword pairs and align them on the
8830 /// correct sides is really tricky. See the comments within the function for
8831 /// more of the details.
8832 static SDValue lowerV8I16SingleInputVectorShuffle(
8833 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8834 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8835 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8836 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8837 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8839 SmallVector<int, 4> LoInputs;
8840 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8841 [](int M) { return M >= 0; });
8842 std::sort(LoInputs.begin(), LoInputs.end());
8843 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8844 SmallVector<int, 4> HiInputs;
8845 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8846 [](int M) { return M >= 0; });
8847 std::sort(HiInputs.begin(), HiInputs.end());
8848 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8850 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8851 int NumHToL = LoInputs.size() - NumLToL;
8853 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8854 int NumHToH = HiInputs.size() - NumLToH;
8855 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8856 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8857 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8858 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8860 // Check for being able to broadcast a single element.
8861 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8862 Mask, Subtarget, DAG))
8865 // Try to use bit shift instructions.
8866 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8867 DL, MVT::v8i16, V, V, Mask, DAG))
8870 // Try to use byte shift instructions.
8871 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8872 DL, MVT::v8i16, V, V, Mask, DAG))
8875 // Use dedicated unpack instructions for masks that match their pattern.
8876 if (isShuffleEquivalent(Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8877 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8878 if (isShuffleEquivalent(Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8879 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8881 // Try to use byte rotation instructions.
8882 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8883 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8886 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8887 // such inputs we can swap two of the dwords across the half mark and end up
8888 // with <=2 inputs to each half in each half. Once there, we can fall through
8889 // to the generic code below. For example:
8891 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8892 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8894 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8895 // and an existing 2-into-2 on the other half. In this case we may have to
8896 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8897 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8898 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8899 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8900 // half than the one we target for fixing) will be fixed when we re-enter this
8901 // path. We will also combine away any sequence of PSHUFD instructions that
8902 // result into a single instruction. Here is an example of the tricky case:
8904 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8905 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8907 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8909 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8910 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8912 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8913 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8915 // The result is fine to be handled by the generic logic.
8916 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8917 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8918 int AOffset, int BOffset) {
8919 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8920 "Must call this with A having 3 or 1 inputs from the A half.");
8921 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8922 "Must call this with B having 1 or 3 inputs from the B half.");
8923 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8924 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
8926 // Compute the index of dword with only one word among the three inputs in
8927 // a half by taking the sum of the half with three inputs and subtracting
8928 // the sum of the actual three inputs. The difference is the remaining
8931 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
8932 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
8933 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
8934 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
8935 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
8936 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
8937 int TripleNonInputIdx =
8938 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
8939 TripleDWord = TripleNonInputIdx / 2;
8941 // We use xor with one to compute the adjacent DWord to whichever one the
8943 OneInputDWord = (OneInput / 2) ^ 1;
8945 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
8946 // and BToA inputs. If there is also such a problem with the BToB and AToB
8947 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
8948 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
8949 // is essential that we don't *create* a 3<-1 as then we might oscillate.
8950 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
8951 // Compute how many inputs will be flipped by swapping these DWords. We
8953 // to balance this to ensure we don't form a 3-1 shuffle in the other
8955 int NumFlippedAToBInputs =
8956 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
8957 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
8958 int NumFlippedBToBInputs =
8959 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
8960 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
8961 if ((NumFlippedAToBInputs == 1 &&
8962 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
8963 (NumFlippedBToBInputs == 1 &&
8964 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
8965 // We choose whether to fix the A half or B half based on whether that
8966 // half has zero flipped inputs. At zero, we may not be able to fix it
8967 // with that half. We also bias towards fixing the B half because that
8968 // will more commonly be the high half, and we have to bias one way.
8969 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
8970 ArrayRef<int> Inputs) {
8971 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
8972 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
8973 PinnedIdx ^ 1) != Inputs.end();
8974 // Determine whether the free index is in the flipped dword or the
8975 // unflipped dword based on where the pinned index is. We use this bit
8976 // in an xor to conditionally select the adjacent dword.
8977 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
8978 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8979 FixFreeIdx) != Inputs.end();
8980 if (IsFixIdxInput == IsFixFreeIdxInput)
8982 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8983 FixFreeIdx) != Inputs.end();
8984 assert(IsFixIdxInput != IsFixFreeIdxInput &&
8985 "We need to be changing the number of flipped inputs!");
8986 int PSHUFHalfMask[] = {0, 1, 2, 3};
8987 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
8988 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
8990 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
8993 if (M != -1 && M == FixIdx)
8995 else if (M != -1 && M == FixFreeIdx)
8998 if (NumFlippedBToBInputs != 0) {
9000 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9001 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9003 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9005 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9006 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9011 int PSHUFDMask[] = {0, 1, 2, 3};
9012 PSHUFDMask[ADWord] = BDWord;
9013 PSHUFDMask[BDWord] = ADWord;
9014 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9015 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9016 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9017 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9019 // Adjust the mask to match the new locations of A and B.
9021 if (M != -1 && M/2 == ADWord)
9022 M = 2 * BDWord + M % 2;
9023 else if (M != -1 && M/2 == BDWord)
9024 M = 2 * ADWord + M % 2;
9026 // Recurse back into this routine to re-compute state now that this isn't
9027 // a 3 and 1 problem.
9028 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9031 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9032 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9033 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9034 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9036 // At this point there are at most two inputs to the low and high halves from
9037 // each half. That means the inputs can always be grouped into dwords and
9038 // those dwords can then be moved to the correct half with a dword shuffle.
9039 // We use at most one low and one high word shuffle to collect these paired
9040 // inputs into dwords, and finally a dword shuffle to place them.
9041 int PSHUFLMask[4] = {-1, -1, -1, -1};
9042 int PSHUFHMask[4] = {-1, -1, -1, -1};
9043 int PSHUFDMask[4] = {-1, -1, -1, -1};
9045 // First fix the masks for all the inputs that are staying in their
9046 // original halves. This will then dictate the targets of the cross-half
9048 auto fixInPlaceInputs =
9049 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9050 MutableArrayRef<int> SourceHalfMask,
9051 MutableArrayRef<int> HalfMask, int HalfOffset) {
9052 if (InPlaceInputs.empty())
9054 if (InPlaceInputs.size() == 1) {
9055 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9056 InPlaceInputs[0] - HalfOffset;
9057 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9060 if (IncomingInputs.empty()) {
9061 // Just fix all of the in place inputs.
9062 for (int Input : InPlaceInputs) {
9063 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9064 PSHUFDMask[Input / 2] = Input / 2;
9069 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9070 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9071 InPlaceInputs[0] - HalfOffset;
9072 // Put the second input next to the first so that they are packed into
9073 // a dword. We find the adjacent index by toggling the low bit.
9074 int AdjIndex = InPlaceInputs[0] ^ 1;
9075 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9076 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9077 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9079 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9080 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9082 // Now gather the cross-half inputs and place them into a free dword of
9083 // their target half.
9084 // FIXME: This operation could almost certainly be simplified dramatically to
9085 // look more like the 3-1 fixing operation.
9086 auto moveInputsToRightHalf = [&PSHUFDMask](
9087 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9088 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9089 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9091 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9092 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9094 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9096 int LowWord = Word & ~1;
9097 int HighWord = Word | 1;
9098 return isWordClobbered(SourceHalfMask, LowWord) ||
9099 isWordClobbered(SourceHalfMask, HighWord);
9102 if (IncomingInputs.empty())
9105 if (ExistingInputs.empty()) {
9106 // Map any dwords with inputs from them into the right half.
9107 for (int Input : IncomingInputs) {
9108 // If the source half mask maps over the inputs, turn those into
9109 // swaps and use the swapped lane.
9110 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9111 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9112 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9113 Input - SourceOffset;
9114 // We have to swap the uses in our half mask in one sweep.
9115 for (int &M : HalfMask)
9116 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9118 else if (M == Input)
9119 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9121 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9122 Input - SourceOffset &&
9123 "Previous placement doesn't match!");
9125 // Note that this correctly re-maps both when we do a swap and when
9126 // we observe the other side of the swap above. We rely on that to
9127 // avoid swapping the members of the input list directly.
9128 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9131 // Map the input's dword into the correct half.
9132 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9133 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9135 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9137 "Previous placement doesn't match!");
9140 // And just directly shift any other-half mask elements to be same-half
9141 // as we will have mirrored the dword containing the element into the
9142 // same position within that half.
9143 for (int &M : HalfMask)
9144 if (M >= SourceOffset && M < SourceOffset + 4) {
9145 M = M - SourceOffset + DestOffset;
9146 assert(M >= 0 && "This should never wrap below zero!");
9151 // Ensure we have the input in a viable dword of its current half. This
9152 // is particularly tricky because the original position may be clobbered
9153 // by inputs being moved and *staying* in that half.
9154 if (IncomingInputs.size() == 1) {
9155 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9156 int InputFixed = std::find(std::begin(SourceHalfMask),
9157 std::end(SourceHalfMask), -1) -
9158 std::begin(SourceHalfMask) + SourceOffset;
9159 SourceHalfMask[InputFixed - SourceOffset] =
9160 IncomingInputs[0] - SourceOffset;
9161 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9163 IncomingInputs[0] = InputFixed;
9165 } else if (IncomingInputs.size() == 2) {
9166 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9167 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9168 // We have two non-adjacent or clobbered inputs we need to extract from
9169 // the source half. To do this, we need to map them into some adjacent
9170 // dword slot in the source mask.
9171 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9172 IncomingInputs[1] - SourceOffset};
9174 // If there is a free slot in the source half mask adjacent to one of
9175 // the inputs, place the other input in it. We use (Index XOR 1) to
9176 // compute an adjacent index.
9177 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9178 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9179 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9180 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9181 InputsFixed[1] = InputsFixed[0] ^ 1;
9182 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9183 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9184 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9185 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9186 InputsFixed[0] = InputsFixed[1] ^ 1;
9187 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9188 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9189 // The two inputs are in the same DWord but it is clobbered and the
9190 // adjacent DWord isn't used at all. Move both inputs to the free
9192 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9193 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9194 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9195 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9197 // The only way we hit this point is if there is no clobbering
9198 // (because there are no off-half inputs to this half) and there is no
9199 // free slot adjacent to one of the inputs. In this case, we have to
9200 // swap an input with a non-input.
9201 for (int i = 0; i < 4; ++i)
9202 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9203 "We can't handle any clobbers here!");
9204 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9205 "Cannot have adjacent inputs here!");
9207 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9208 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9210 // We also have to update the final source mask in this case because
9211 // it may need to undo the above swap.
9212 for (int &M : FinalSourceHalfMask)
9213 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9214 M = InputsFixed[1] + SourceOffset;
9215 else if (M == InputsFixed[1] + SourceOffset)
9216 M = (InputsFixed[0] ^ 1) + SourceOffset;
9218 InputsFixed[1] = InputsFixed[0] ^ 1;
9221 // Point everything at the fixed inputs.
9222 for (int &M : HalfMask)
9223 if (M == IncomingInputs[0])
9224 M = InputsFixed[0] + SourceOffset;
9225 else if (M == IncomingInputs[1])
9226 M = InputsFixed[1] + SourceOffset;
9228 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9229 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9232 llvm_unreachable("Unhandled input size!");
9235 // Now hoist the DWord down to the right half.
9236 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9237 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9238 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9239 for (int &M : HalfMask)
9240 for (int Input : IncomingInputs)
9242 M = FreeDWord * 2 + Input % 2;
9244 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9245 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9246 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9247 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9249 // Now enact all the shuffles we've computed to move the inputs into their
9251 if (!isNoopShuffleMask(PSHUFLMask))
9252 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9253 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9254 if (!isNoopShuffleMask(PSHUFHMask))
9255 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9256 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9257 if (!isNoopShuffleMask(PSHUFDMask))
9258 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9259 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9260 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9261 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9263 // At this point, each half should contain all its inputs, and we can then
9264 // just shuffle them into their final position.
9265 assert(std::count_if(LoMask.begin(), LoMask.end(),
9266 [](int M) { return M >= 4; }) == 0 &&
9267 "Failed to lift all the high half inputs to the low mask!");
9268 assert(std::count_if(HiMask.begin(), HiMask.end(),
9269 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9270 "Failed to lift all the low half inputs to the high mask!");
9272 // Do a half shuffle for the low mask.
9273 if (!isNoopShuffleMask(LoMask))
9274 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9275 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9277 // Do a half shuffle with the high mask after shifting its values down.
9278 for (int &M : HiMask)
9281 if (!isNoopShuffleMask(HiMask))
9282 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9283 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9288 /// \brief Detect whether the mask pattern should be lowered through
9291 /// This essentially tests whether viewing the mask as an interleaving of two
9292 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9293 /// lowering it through interleaving is a significantly better strategy.
9294 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9295 int NumEvenInputs[2] = {0, 0};
9296 int NumOddInputs[2] = {0, 0};
9297 int NumLoInputs[2] = {0, 0};
9298 int NumHiInputs[2] = {0, 0};
9299 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9303 int InputIdx = Mask[i] >= Size;
9306 ++NumLoInputs[InputIdx];
9308 ++NumHiInputs[InputIdx];
9311 ++NumEvenInputs[InputIdx];
9313 ++NumOddInputs[InputIdx];
9316 // The minimum number of cross-input results for both the interleaved and
9317 // split cases. If interleaving results in fewer cross-input results, return
9319 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9320 NumEvenInputs[0] + NumOddInputs[1]);
9321 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9322 NumLoInputs[0] + NumHiInputs[1]);
9323 return InterleavedCrosses < SplitCrosses;
9326 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9328 /// This strategy only works when the inputs from each vector fit into a single
9329 /// half of that vector, and generally there are not so many inputs as to leave
9330 /// the in-place shuffles required highly constrained (and thus expensive). It
9331 /// shifts all the inputs into a single side of both input vectors and then
9332 /// uses an unpack to interleave these inputs in a single vector. At that
9333 /// point, we will fall back on the generic single input shuffle lowering.
9334 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9336 MutableArrayRef<int> Mask,
9337 const X86Subtarget *Subtarget,
9338 SelectionDAG &DAG) {
9339 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9340 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9341 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9342 for (int i = 0; i < 8; ++i)
9343 if (Mask[i] >= 0 && Mask[i] < 4)
9344 LoV1Inputs.push_back(i);
9345 else if (Mask[i] >= 4 && Mask[i] < 8)
9346 HiV1Inputs.push_back(i);
9347 else if (Mask[i] >= 8 && Mask[i] < 12)
9348 LoV2Inputs.push_back(i);
9349 else if (Mask[i] >= 12)
9350 HiV2Inputs.push_back(i);
9352 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9353 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9356 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9357 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9358 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9360 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9361 HiV1Inputs.size() + HiV2Inputs.size();
9363 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9364 ArrayRef<int> HiInputs, bool MoveToLo,
9366 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9367 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9368 if (BadInputs.empty())
9371 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9372 int MoveOffset = MoveToLo ? 0 : 4;
9374 if (GoodInputs.empty()) {
9375 for (int BadInput : BadInputs) {
9376 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9377 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9380 if (GoodInputs.size() == 2) {
9381 // If the low inputs are spread across two dwords, pack them into
9383 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9384 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9385 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9386 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9388 // Otherwise pin the good inputs.
9389 for (int GoodInput : GoodInputs)
9390 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9393 if (BadInputs.size() == 2) {
9394 // If we have two bad inputs then there may be either one or two good
9395 // inputs fixed in place. Find a fixed input, and then find the *other*
9396 // two adjacent indices by using modular arithmetic.
9398 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9399 [](int M) { return M >= 0; }) -
9400 std::begin(MoveMask);
9402 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9403 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9404 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9405 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9406 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9407 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9408 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9410 assert(BadInputs.size() == 1 && "All sizes handled");
9411 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9412 std::end(MoveMask), -1) -
9413 std::begin(MoveMask);
9414 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9415 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9419 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9422 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9424 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9427 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9428 // cross-half traffic in the final shuffle.
9430 // Munge the mask to be a single-input mask after the unpack merges the
9434 M = 2 * (M % 4) + (M / 8);
9436 return DAG.getVectorShuffle(
9437 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9438 DL, MVT::v8i16, V1, V2),
9439 DAG.getUNDEF(MVT::v8i16), Mask);
9442 /// \brief Generic lowering of 8-lane i16 shuffles.
9444 /// This handles both single-input shuffles and combined shuffle/blends with
9445 /// two inputs. The single input shuffles are immediately delegated to
9446 /// a dedicated lowering routine.
9448 /// The blends are lowered in one of three fundamental ways. If there are few
9449 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9450 /// of the input is significantly cheaper when lowered as an interleaving of
9451 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9452 /// halves of the inputs separately (making them have relatively few inputs)
9453 /// and then concatenate them.
9454 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9455 const X86Subtarget *Subtarget,
9456 SelectionDAG &DAG) {
9458 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9459 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9460 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9461 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9462 ArrayRef<int> OrigMask = SVOp->getMask();
9463 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9464 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9465 MutableArrayRef<int> Mask(MaskStorage);
9467 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9469 // Whenever we can lower this as a zext, that instruction is strictly faster
9470 // than any alternative.
9471 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9472 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9475 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9476 auto isV2 = [](int M) { return M >= 8; };
9478 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9479 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9481 if (NumV2Inputs == 0)
9482 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9484 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9485 "to be V1-input shuffles.");
9487 // Try to use bit shift instructions.
9488 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9489 DL, MVT::v8i16, V1, V2, Mask, DAG))
9492 // Try to use byte shift instructions.
9493 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9494 DL, MVT::v8i16, V1, V2, Mask, DAG))
9497 // There are special ways we can lower some single-element blends.
9498 if (NumV2Inputs == 1)
9499 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9500 Mask, Subtarget, DAG))
9503 if (Subtarget->hasSSE41())
9504 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9508 if (SDValue Masked =
9509 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9512 // Use dedicated unpack instructions for masks that match their pattern.
9513 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9514 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9515 if (isShuffleEquivalent(Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9516 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9518 // Try to use byte rotation instructions.
9519 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9520 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9523 if (NumV1Inputs + NumV2Inputs <= 4)
9524 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9526 // Check whether an interleaving lowering is likely to be more efficient.
9527 // This isn't perfect but it is a strong heuristic that tends to work well on
9528 // the kinds of shuffles that show up in practice.
9530 // FIXME: Handle 1x, 2x, and 4x interleaving.
9531 if (shouldLowerAsInterleaving(Mask)) {
9532 // FIXME: Figure out whether we should pack these into the low or high
9535 int EMask[8], OMask[8];
9536 for (int i = 0; i < 4; ++i) {
9537 EMask[i] = Mask[2*i];
9538 OMask[i] = Mask[2*i + 1];
9543 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9544 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9546 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9549 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9550 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9552 for (int i = 0; i < 4; ++i) {
9553 LoBlendMask[i] = Mask[i];
9554 HiBlendMask[i] = Mask[i + 4];
9557 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9558 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9559 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9560 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9562 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9563 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9566 /// \brief Check whether a compaction lowering can be done by dropping even
9567 /// elements and compute how many times even elements must be dropped.
9569 /// This handles shuffles which take every Nth element where N is a power of
9570 /// two. Example shuffle masks:
9572 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9573 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9574 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9575 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9576 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9577 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9579 /// Any of these lanes can of course be undef.
9581 /// This routine only supports N <= 3.
9582 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9585 /// \returns N above, or the number of times even elements must be dropped if
9586 /// there is such a number. Otherwise returns zero.
9587 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9588 // Figure out whether we're looping over two inputs or just one.
9589 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9591 // The modulus for the shuffle vector entries is based on whether this is
9592 // a single input or not.
9593 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9594 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9595 "We should only be called with masks with a power-of-2 size!");
9597 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9599 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9600 // and 2^3 simultaneously. This is because we may have ambiguity with
9601 // partially undef inputs.
9602 bool ViableForN[3] = {true, true, true};
9604 for (int i = 0, e = Mask.size(); i < e; ++i) {
9605 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9610 bool IsAnyViable = false;
9611 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9612 if (ViableForN[j]) {
9615 // The shuffle mask must be equal to (i * 2^N) % M.
9616 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9619 ViableForN[j] = false;
9621 // Early exit if we exhaust the possible powers of two.
9626 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9630 // Return 0 as there is no viable power of two.
9634 /// \brief Generic lowering of v16i8 shuffles.
9636 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9637 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9638 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9639 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9641 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9642 const X86Subtarget *Subtarget,
9643 SelectionDAG &DAG) {
9645 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9646 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9647 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9648 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9649 ArrayRef<int> OrigMask = SVOp->getMask();
9650 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9652 // Try to use bit shift instructions.
9653 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9654 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9657 // Try to use byte shift instructions.
9658 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9659 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9662 // Try to use byte rotation instructions.
9663 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9664 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9667 // Try to use a zext lowering.
9668 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9669 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9672 int MaskStorage[16] = {
9673 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9674 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9675 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9676 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9677 MutableArrayRef<int> Mask(MaskStorage);
9678 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9679 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9682 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9684 // For single-input shuffles, there are some nicer lowering tricks we can use.
9685 if (NumV2Elements == 0) {
9686 // Check for being able to broadcast a single element.
9687 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9688 Mask, Subtarget, DAG))
9691 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9692 // Notably, this handles splat and partial-splat shuffles more efficiently.
9693 // However, it only makes sense if the pre-duplication shuffle simplifies
9694 // things significantly. Currently, this means we need to be able to
9695 // express the pre-duplication shuffle as an i16 shuffle.
9697 // FIXME: We should check for other patterns which can be widened into an
9698 // i16 shuffle as well.
9699 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9700 for (int i = 0; i < 16; i += 2)
9701 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9706 auto tryToWidenViaDuplication = [&]() -> SDValue {
9707 if (!canWidenViaDuplication(Mask))
9709 SmallVector<int, 4> LoInputs;
9710 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9711 [](int M) { return M >= 0 && M < 8; });
9712 std::sort(LoInputs.begin(), LoInputs.end());
9713 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9715 SmallVector<int, 4> HiInputs;
9716 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9717 [](int M) { return M >= 8; });
9718 std::sort(HiInputs.begin(), HiInputs.end());
9719 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9722 bool TargetLo = LoInputs.size() >= HiInputs.size();
9723 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9724 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9726 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9727 SmallDenseMap<int, int, 8> LaneMap;
9728 for (int I : InPlaceInputs) {
9729 PreDupI16Shuffle[I/2] = I/2;
9732 int j = TargetLo ? 0 : 4, je = j + 4;
9733 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9734 // Check if j is already a shuffle of this input. This happens when
9735 // there are two adjacent bytes after we move the low one.
9736 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9737 // If we haven't yet mapped the input, search for a slot into which
9739 while (j < je && PreDupI16Shuffle[j] != -1)
9743 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9746 // Map this input with the i16 shuffle.
9747 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9750 // Update the lane map based on the mapping we ended up with.
9751 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9754 ISD::BITCAST, DL, MVT::v16i8,
9755 DAG.getVectorShuffle(MVT::v8i16, DL,
9756 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9757 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9759 // Unpack the bytes to form the i16s that will be shuffled into place.
9760 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9761 MVT::v16i8, V1, V1);
9763 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9764 for (int i = 0; i < 16; ++i)
9765 if (Mask[i] != -1) {
9766 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9767 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9768 if (PostDupI16Shuffle[i / 2] == -1)
9769 PostDupI16Shuffle[i / 2] = MappedMask;
9771 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9772 "Conflicting entrties in the original shuffle!");
9775 ISD::BITCAST, DL, MVT::v16i8,
9776 DAG.getVectorShuffle(MVT::v8i16, DL,
9777 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9778 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9780 if (SDValue V = tryToWidenViaDuplication())
9784 // Check whether an interleaving lowering is likely to be more efficient.
9785 // This isn't perfect but it is a strong heuristic that tends to work well on
9786 // the kinds of shuffles that show up in practice.
9788 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9789 if (shouldLowerAsInterleaving(Mask)) {
9790 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9791 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9793 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9794 return (M >= 8 && M < 16) || M >= 24;
9796 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9797 -1, -1, -1, -1, -1, -1, -1, -1};
9798 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9799 -1, -1, -1, -1, -1, -1, -1, -1};
9800 bool UnpackLo = NumLoHalf >= NumHiHalf;
9801 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9802 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9803 for (int i = 0; i < 8; ++i) {
9804 TargetEMask[i] = Mask[2 * i];
9805 TargetOMask[i] = Mask[2 * i + 1];
9808 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9809 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9811 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9812 MVT::v16i8, Evens, Odds);
9815 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9816 // with PSHUFB. It is important to do this before we attempt to generate any
9817 // blends but after all of the single-input lowerings. If the single input
9818 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9819 // want to preserve that and we can DAG combine any longer sequences into
9820 // a PSHUFB in the end. But once we start blending from multiple inputs,
9821 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9822 // and there are *very* few patterns that would actually be faster than the
9823 // PSHUFB approach because of its ability to zero lanes.
9825 // FIXME: The only exceptions to the above are blends which are exact
9826 // interleavings with direct instructions supporting them. We currently don't
9827 // handle those well here.
9828 if (Subtarget->hasSSSE3()) {
9831 bool V1InUse = false;
9832 bool V2InUse = false;
9833 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9835 for (int i = 0; i < 16; ++i) {
9836 if (Mask[i] == -1) {
9837 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9839 const int ZeroMask = 0x80;
9840 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9841 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9843 V1Idx = V2Idx = ZeroMask;
9844 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9845 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9846 V1InUse |= (ZeroMask != V1Idx);
9847 V2InUse |= (ZeroMask != V2Idx);
9852 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9853 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9855 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9856 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9858 // If we need shuffled inputs from both, blend the two.
9859 if (V1InUse && V2InUse)
9860 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9862 return V1; // Single inputs are easy.
9864 return V2; // Single inputs are easy.
9865 // Shuffling to a zeroable vector.
9866 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9869 // There are special ways we can lower some single-element blends.
9870 if (NumV2Elements == 1)
9871 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9872 Mask, Subtarget, DAG))
9875 // Check whether a compaction lowering can be done. This handles shuffles
9876 // which take every Nth element for some even N. See the helper function for
9879 // We special case these as they can be particularly efficiently handled with
9880 // the PACKUSB instruction on x86 and they show up in common patterns of
9881 // rearranging bytes to truncate wide elements.
9882 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9883 // NumEvenDrops is the power of two stride of the elements. Another way of
9884 // thinking about it is that we need to drop the even elements this many
9885 // times to get the original input.
9886 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9888 // First we need to zero all the dropped bytes.
9889 assert(NumEvenDrops <= 3 &&
9890 "No support for dropping even elements more than 3 times.");
9891 // We use the mask type to pick which bytes are preserved based on how many
9892 // elements are dropped.
9893 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9894 SDValue ByteClearMask =
9895 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9896 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9897 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9899 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9901 // Now pack things back together.
9902 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9903 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9904 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9905 for (int i = 1; i < NumEvenDrops; ++i) {
9906 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9907 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9913 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9914 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9915 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9916 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9918 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9919 MutableArrayRef<int> V1HalfBlendMask,
9920 MutableArrayRef<int> V2HalfBlendMask) {
9921 for (int i = 0; i < 8; ++i)
9922 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9923 V1HalfBlendMask[i] = HalfMask[i];
9925 } else if (HalfMask[i] >= 16) {
9926 V2HalfBlendMask[i] = HalfMask[i] - 16;
9927 HalfMask[i] = i + 8;
9930 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
9931 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
9933 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9935 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
9936 MutableArrayRef<int> HiBlendMask) {
9938 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
9939 // them out and avoid using UNPCK{L,H} to extract the elements of V as
9941 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
9942 [](int M) { return M >= 0 && M % 2 == 1; }) &&
9943 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
9944 [](int M) { return M >= 0 && M % 2 == 1; })) {
9945 // Use a mask to drop the high bytes.
9946 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
9947 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
9948 DAG.getConstant(0x00FF, MVT::v8i16));
9950 // This will be a single vector shuffle instead of a blend so nuke V2.
9951 V2 = DAG.getUNDEF(MVT::v8i16);
9953 // Squash the masks to point directly into V1.
9954 for (int &M : LoBlendMask)
9957 for (int &M : HiBlendMask)
9961 // Otherwise just unpack the low half of V into V1 and the high half into
9962 // V2 so that we can blend them as i16s.
9963 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9964 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
9965 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9966 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
9969 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9970 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9971 return std::make_pair(BlendedLo, BlendedHi);
9973 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
9974 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
9975 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
9977 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
9978 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
9980 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
9983 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
9985 /// This routine breaks down the specific type of 128-bit shuffle and
9986 /// dispatches to the lowering routines accordingly.
9987 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9988 MVT VT, const X86Subtarget *Subtarget,
9989 SelectionDAG &DAG) {
9990 switch (VT.SimpleTy) {
9992 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9994 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9996 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9998 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10000 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10002 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10005 llvm_unreachable("Unimplemented!");
10009 /// \brief Helper function to test whether a shuffle mask could be
10010 /// simplified by widening the elements being shuffled.
10012 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10013 /// leaves it in an unspecified state.
10015 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10016 /// shuffle masks. The latter have the special property of a '-2' representing
10017 /// a zero-ed lane of a vector.
10018 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10019 SmallVectorImpl<int> &WidenedMask) {
10020 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10021 // If both elements are undef, its trivial.
10022 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10023 WidenedMask.push_back(SM_SentinelUndef);
10027 // Check for an undef mask and a mask value properly aligned to fit with
10028 // a pair of values. If we find such a case, use the non-undef mask's value.
10029 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10030 WidenedMask.push_back(Mask[i + 1] / 2);
10033 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10034 WidenedMask.push_back(Mask[i] / 2);
10038 // When zeroing, we need to spread the zeroing across both lanes to widen.
10039 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10040 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10041 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10042 WidenedMask.push_back(SM_SentinelZero);
10048 // Finally check if the two mask values are adjacent and aligned with
10050 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10051 WidenedMask.push_back(Mask[i] / 2);
10055 // Otherwise we can't safely widen the elements used in this shuffle.
10058 assert(WidenedMask.size() == Mask.size() / 2 &&
10059 "Incorrect size of mask after widening the elements!");
10064 /// \brief Generic routine to split ector shuffle into half-sized shuffles.
10066 /// This routine just extracts two subvectors, shuffles them independently, and
10067 /// then concatenates them back together. This should work effectively with all
10068 /// AVX vector shuffle types.
10069 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10070 SDValue V2, ArrayRef<int> Mask,
10071 SelectionDAG &DAG) {
10072 assert(VT.getSizeInBits() >= 256 &&
10073 "Only for 256-bit or wider vector shuffles!");
10074 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10075 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10077 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10078 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10080 int NumElements = VT.getVectorNumElements();
10081 int SplitNumElements = NumElements / 2;
10082 MVT ScalarVT = VT.getScalarType();
10083 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10085 SDValue LoV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
10086 DAG.getIntPtrConstant(0));
10087 SDValue HiV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
10088 DAG.getIntPtrConstant(SplitNumElements));
10089 SDValue LoV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
10090 DAG.getIntPtrConstant(0));
10091 SDValue HiV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
10092 DAG.getIntPtrConstant(SplitNumElements));
10094 // Now create two 4-way blends of these half-width vectors.
10095 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10096 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10097 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10098 for (int i = 0; i < SplitNumElements; ++i) {
10099 int M = HalfMask[i];
10100 if (M >= NumElements) {
10101 if (M >= NumElements + SplitNumElements)
10105 V2BlendMask.push_back(M - NumElements);
10106 V1BlendMask.push_back(-1);
10107 BlendMask.push_back(SplitNumElements + i);
10108 } else if (M >= 0) {
10109 if (M >= SplitNumElements)
10113 V2BlendMask.push_back(-1);
10114 V1BlendMask.push_back(M);
10115 BlendMask.push_back(i);
10117 V2BlendMask.push_back(-1);
10118 V1BlendMask.push_back(-1);
10119 BlendMask.push_back(-1);
10123 // Because the lowering happens after all combining takes place, we need to
10124 // manually combine these blend masks as much as possible so that we create
10125 // a minimal number of high-level vector shuffle nodes.
10127 // First try just blending the halves of V1 or V2.
10128 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10129 return DAG.getUNDEF(SplitVT);
10130 if (!UseLoV2 && !UseHiV2)
10131 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10132 if (!UseLoV1 && !UseHiV1)
10133 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10135 SDValue V1Blend, V2Blend;
10136 if (UseLoV1 && UseHiV1) {
10138 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10140 // We only use half of V1 so map the usage down into the final blend mask.
10141 V1Blend = UseLoV1 ? LoV1 : HiV1;
10142 for (int i = 0; i < SplitNumElements; ++i)
10143 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10144 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10146 if (UseLoV2 && UseHiV2) {
10148 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10150 // We only use half of V2 so map the usage down into the final blend mask.
10151 V2Blend = UseLoV2 ? LoV2 : HiV2;
10152 for (int i = 0; i < SplitNumElements; ++i)
10153 if (BlendMask[i] >= SplitNumElements)
10154 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10156 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10158 SDValue Lo = HalfBlend(LoMask);
10159 SDValue Hi = HalfBlend(HiMask);
10160 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10163 /// \brief Either split a vector in halves or decompose the shuffles and the
10166 /// This is provided as a good fallback for many lowerings of non-single-input
10167 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10168 /// between splitting the shuffle into 128-bit components and stitching those
10169 /// back together vs. extracting the single-input shuffles and blending those
10171 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10172 SDValue V2, ArrayRef<int> Mask,
10173 SelectionDAG &DAG) {
10174 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10175 "lower single-input shuffles as it "
10176 "could then recurse on itself.");
10177 int Size = Mask.size();
10179 // If this can be modeled as a broadcast of two elements followed by a blend,
10180 // prefer that lowering. This is especially important because broadcasts can
10181 // often fold with memory operands.
10182 auto DoBothBroadcast = [&] {
10183 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10186 if (V2BroadcastIdx == -1)
10187 V2BroadcastIdx = M - Size;
10188 else if (M - Size != V2BroadcastIdx)
10190 } else if (M >= 0) {
10191 if (V1BroadcastIdx == -1)
10192 V1BroadcastIdx = M;
10193 else if (M != V1BroadcastIdx)
10198 if (DoBothBroadcast())
10199 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10202 // If the inputs all stem from a single 128-bit lane of each input, then we
10203 // split them rather than blending because the split will decompose to
10204 // unusually few instructions.
10205 int LaneCount = VT.getSizeInBits() / 128;
10206 int LaneSize = Size / LaneCount;
10207 SmallBitVector LaneInputs[2];
10208 LaneInputs[0].resize(LaneCount, false);
10209 LaneInputs[1].resize(LaneCount, false);
10210 for (int i = 0; i < Size; ++i)
10212 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10213 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10214 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10216 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10217 // that the decomposed single-input shuffles don't end up here.
10218 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10221 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10222 /// a permutation and blend of those lanes.
10224 /// This essentially blends the out-of-lane inputs to each lane into the lane
10225 /// from a permuted copy of the vector. This lowering strategy results in four
10226 /// instructions in the worst case for a single-input cross lane shuffle which
10227 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10228 /// of. Special cases for each particular shuffle pattern should be handled
10229 /// prior to trying this lowering.
10230 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10231 SDValue V1, SDValue V2,
10232 ArrayRef<int> Mask,
10233 SelectionDAG &DAG) {
10234 // FIXME: This should probably be generalized for 512-bit vectors as well.
10235 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10236 int LaneSize = Mask.size() / 2;
10238 // If there are only inputs from one 128-bit lane, splitting will in fact be
10239 // less expensive. The flags track wether the given lane contains an element
10240 // that crosses to another lane.
10241 bool LaneCrossing[2] = {false, false};
10242 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10243 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10244 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10245 if (!LaneCrossing[0] || !LaneCrossing[1])
10246 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10248 if (isSingleInputShuffleMask(Mask)) {
10249 SmallVector<int, 32> FlippedBlendMask;
10250 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10251 FlippedBlendMask.push_back(
10252 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10254 : Mask[i] % LaneSize +
10255 (i / LaneSize) * LaneSize + Size));
10257 // Flip the vector, and blend the results which should now be in-lane. The
10258 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10259 // 5 for the high source. The value 3 selects the high half of source 2 and
10260 // the value 2 selects the low half of source 2. We only use source 2 to
10261 // allow folding it into a memory operand.
10262 unsigned PERMMask = 3 | 2 << 4;
10263 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10264 V1, DAG.getConstant(PERMMask, MVT::i8));
10265 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10268 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10269 // will be handled by the above logic and a blend of the results, much like
10270 // other patterns in AVX.
10271 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10274 /// \brief Handle lowering 2-lane 128-bit shuffles.
10275 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10276 SDValue V2, ArrayRef<int> Mask,
10277 const X86Subtarget *Subtarget,
10278 SelectionDAG &DAG) {
10279 // Blends are faster and handle all the non-lane-crossing cases.
10280 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10284 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10285 VT.getVectorNumElements() / 2);
10286 // Check for patterns which can be matched with a single insert of a 128-bit
10288 if (isShuffleEquivalent(Mask, 0, 1, 0, 1) ||
10289 isShuffleEquivalent(Mask, 0, 1, 4, 5)) {
10290 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10291 DAG.getIntPtrConstant(0));
10292 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10293 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10294 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10296 if (isShuffleEquivalent(Mask, 0, 1, 6, 7)) {
10297 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10298 DAG.getIntPtrConstant(0));
10299 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10300 DAG.getIntPtrConstant(2));
10301 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10304 // Otherwise form a 128-bit permutation.
10305 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10306 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10307 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10308 DAG.getConstant(PermMask, MVT::i8));
10311 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10312 /// shuffling each lane.
10314 /// This will only succeed when the result of fixing the 128-bit lanes results
10315 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10316 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10317 /// the lane crosses early and then use simpler shuffles within each lane.
10319 /// FIXME: It might be worthwhile at some point to support this without
10320 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10321 /// in x86 only floating point has interesting non-repeating shuffles, and even
10322 /// those are still *marginally* more expensive.
10323 static SDValue lowerVectorShuffleByMerging128BitLanes(
10324 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10325 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10326 assert(!isSingleInputShuffleMask(Mask) &&
10327 "This is only useful with multiple inputs.");
10329 int Size = Mask.size();
10330 int LaneSize = 128 / VT.getScalarSizeInBits();
10331 int NumLanes = Size / LaneSize;
10332 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10334 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10335 // check whether the in-128-bit lane shuffles share a repeating pattern.
10336 SmallVector<int, 4> Lanes;
10337 Lanes.resize(NumLanes, -1);
10338 SmallVector<int, 4> InLaneMask;
10339 InLaneMask.resize(LaneSize, -1);
10340 for (int i = 0; i < Size; ++i) {
10344 int j = i / LaneSize;
10346 if (Lanes[j] < 0) {
10347 // First entry we've seen for this lane.
10348 Lanes[j] = Mask[i] / LaneSize;
10349 } else if (Lanes[j] != Mask[i] / LaneSize) {
10350 // This doesn't match the lane selected previously!
10354 // Check that within each lane we have a consistent shuffle mask.
10355 int k = i % LaneSize;
10356 if (InLaneMask[k] < 0) {
10357 InLaneMask[k] = Mask[i] % LaneSize;
10358 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10359 // This doesn't fit a repeating in-lane mask.
10364 // First shuffle the lanes into place.
10365 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10366 VT.getSizeInBits() / 64);
10367 SmallVector<int, 8> LaneMask;
10368 LaneMask.resize(NumLanes * 2, -1);
10369 for (int i = 0; i < NumLanes; ++i)
10370 if (Lanes[i] >= 0) {
10371 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10372 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10375 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10376 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10377 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10379 // Cast it back to the type we actually want.
10380 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10382 // Now do a simple shuffle that isn't lane crossing.
10383 SmallVector<int, 8> NewMask;
10384 NewMask.resize(Size, -1);
10385 for (int i = 0; i < Size; ++i)
10387 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10388 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10389 "Must not introduce lane crosses at this point!");
10391 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10394 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10397 /// This returns true if the elements from a particular input are already in the
10398 /// slot required by the given mask and require no permutation.
10399 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10400 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10401 int Size = Mask.size();
10402 for (int i = 0; i < Size; ++i)
10403 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10409 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10411 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10412 /// isn't available.
10413 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10414 const X86Subtarget *Subtarget,
10415 SelectionDAG &DAG) {
10417 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10418 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10419 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10420 ArrayRef<int> Mask = SVOp->getMask();
10421 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10423 SmallVector<int, 4> WidenedMask;
10424 if (canWidenShuffleElements(Mask, WidenedMask))
10425 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10428 if (isSingleInputShuffleMask(Mask)) {
10429 // Check for being able to broadcast a single element.
10430 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10431 Mask, Subtarget, DAG))
10434 // Use low duplicate instructions for masks that match their pattern.
10435 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
10436 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10438 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10439 // Non-half-crossing single input shuffles can be lowerid with an
10440 // interleaved permutation.
10441 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10442 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10443 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10444 DAG.getConstant(VPERMILPMask, MVT::i8));
10447 // With AVX2 we have direct support for this permutation.
10448 if (Subtarget->hasAVX2())
10449 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10450 getV4X86ShuffleImm8ForMask(Mask, DAG));
10452 // Otherwise, fall back.
10453 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10457 // X86 has dedicated unpack instructions that can handle specific blend
10458 // operations: UNPCKH and UNPCKL.
10459 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10460 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10461 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10462 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10464 // If we have a single input to the zero element, insert that into V1 if we
10465 // can do so cheaply.
10466 int NumV2Elements =
10467 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10468 if (NumV2Elements == 1 && Mask[0] >= 4)
10469 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10470 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10473 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10477 // Check if the blend happens to exactly fit that of SHUFPD.
10478 if ((Mask[0] == -1 || Mask[0] < 2) &&
10479 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10480 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10481 (Mask[3] == -1 || Mask[3] >= 6)) {
10482 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10483 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10484 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10485 DAG.getConstant(SHUFPDMask, MVT::i8));
10487 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10488 (Mask[1] == -1 || Mask[1] < 2) &&
10489 (Mask[2] == -1 || Mask[2] >= 6) &&
10490 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10491 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10492 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10493 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10494 DAG.getConstant(SHUFPDMask, MVT::i8));
10497 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10498 // shuffle. However, if we have AVX2 and either inputs are already in place,
10499 // we will be able to shuffle even across lanes the other input in a single
10500 // instruction so skip this pattern.
10501 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10502 isShuffleMaskInputInPlace(1, Mask))))
10503 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10504 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10507 // If we have AVX2 then we always want to lower with a blend because an v4 we
10508 // can fully permute the elements.
10509 if (Subtarget->hasAVX2())
10510 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10513 // Otherwise fall back on generic lowering.
10514 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10517 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10519 /// This routine is only called when we have AVX2 and thus a reasonable
10520 /// instruction set for v4i64 shuffling..
10521 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10522 const X86Subtarget *Subtarget,
10523 SelectionDAG &DAG) {
10525 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10526 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10527 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10528 ArrayRef<int> Mask = SVOp->getMask();
10529 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10530 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10532 SmallVector<int, 4> WidenedMask;
10533 if (canWidenShuffleElements(Mask, WidenedMask))
10534 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10537 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10541 // Check for being able to broadcast a single element.
10542 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10543 Mask, Subtarget, DAG))
10546 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10547 // use lower latency instructions that will operate on both 128-bit lanes.
10548 SmallVector<int, 2> RepeatedMask;
10549 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10550 if (isSingleInputShuffleMask(Mask)) {
10551 int PSHUFDMask[] = {-1, -1, -1, -1};
10552 for (int i = 0; i < 2; ++i)
10553 if (RepeatedMask[i] >= 0) {
10554 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10555 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10557 return DAG.getNode(
10558 ISD::BITCAST, DL, MVT::v4i64,
10559 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10560 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10561 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10564 // Use dedicated unpack instructions for masks that match their pattern.
10565 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10566 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10567 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10568 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10571 // AVX2 provides a direct instruction for permuting a single input across
10573 if (isSingleInputShuffleMask(Mask))
10574 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10575 getV4X86ShuffleImm8ForMask(Mask, DAG));
10577 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10578 // shuffle. However, if we have AVX2 and either inputs are already in place,
10579 // we will be able to shuffle even across lanes the other input in a single
10580 // instruction so skip this pattern.
10581 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10582 isShuffleMaskInputInPlace(1, Mask))))
10583 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10584 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10587 // Otherwise fall back on generic blend lowering.
10588 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10592 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10594 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10595 /// isn't available.
10596 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10597 const X86Subtarget *Subtarget,
10598 SelectionDAG &DAG) {
10600 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10601 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10602 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10603 ArrayRef<int> Mask = SVOp->getMask();
10604 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10606 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10610 // Check for being able to broadcast a single element.
10611 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10612 Mask, Subtarget, DAG))
10615 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10616 // options to efficiently lower the shuffle.
10617 SmallVector<int, 4> RepeatedMask;
10618 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10619 assert(RepeatedMask.size() == 4 &&
10620 "Repeated masks must be half the mask width!");
10622 // Use even/odd duplicate instructions for masks that match their pattern.
10623 if (isShuffleEquivalent(Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10624 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10625 if (isShuffleEquivalent(Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10626 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10628 if (isSingleInputShuffleMask(Mask))
10629 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10630 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10632 // Use dedicated unpack instructions for masks that match their pattern.
10633 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10634 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10635 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10636 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10638 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10639 // have already handled any direct blends. We also need to squash the
10640 // repeated mask into a simulated v4f32 mask.
10641 for (int i = 0; i < 4; ++i)
10642 if (RepeatedMask[i] >= 8)
10643 RepeatedMask[i] -= 4;
10644 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10647 // If we have a single input shuffle with different shuffle patterns in the
10648 // two 128-bit lanes use the variable mask to VPERMILPS.
10649 if (isSingleInputShuffleMask(Mask)) {
10650 SDValue VPermMask[8];
10651 for (int i = 0; i < 8; ++i)
10652 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10653 : DAG.getConstant(Mask[i], MVT::i32);
10654 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10655 return DAG.getNode(
10656 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10657 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10659 if (Subtarget->hasAVX2())
10660 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10661 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10662 DAG.getNode(ISD::BUILD_VECTOR, DL,
10663 MVT::v8i32, VPermMask)),
10666 // Otherwise, fall back.
10667 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10671 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10673 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10674 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10677 // If we have AVX2 then we always want to lower with a blend because at v8 we
10678 // can fully permute the elements.
10679 if (Subtarget->hasAVX2())
10680 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10683 // Otherwise fall back on generic lowering.
10684 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10687 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10689 /// This routine is only called when we have AVX2 and thus a reasonable
10690 /// instruction set for v8i32 shuffling..
10691 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10692 const X86Subtarget *Subtarget,
10693 SelectionDAG &DAG) {
10695 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10696 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10697 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10698 ArrayRef<int> Mask = SVOp->getMask();
10699 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10700 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10702 // Whenever we can lower this as a zext, that instruction is strictly faster
10703 // than any alternative. It also allows us to fold memory operands into the
10704 // shuffle in many cases.
10705 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10706 Mask, Subtarget, DAG))
10709 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10713 // Check for being able to broadcast a single element.
10714 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10715 Mask, Subtarget, DAG))
10718 // If the shuffle mask is repeated in each 128-bit lane we can use more
10719 // efficient instructions that mirror the shuffles across the two 128-bit
10721 SmallVector<int, 4> RepeatedMask;
10722 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10723 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10724 if (isSingleInputShuffleMask(Mask))
10725 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10726 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10728 // Use dedicated unpack instructions for masks that match their pattern.
10729 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10730 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10731 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10732 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10735 // If the shuffle patterns aren't repeated but it is a single input, directly
10736 // generate a cross-lane VPERMD instruction.
10737 if (isSingleInputShuffleMask(Mask)) {
10738 SDValue VPermMask[8];
10739 for (int i = 0; i < 8; ++i)
10740 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10741 : DAG.getConstant(Mask[i], MVT::i32);
10742 return DAG.getNode(
10743 X86ISD::VPERMV, DL, MVT::v8i32,
10744 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10747 // Try to use bit shift instructions.
10748 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10749 DL, MVT::v8i32, V1, V2, Mask, DAG))
10752 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10754 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10755 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10758 // Otherwise fall back on generic blend lowering.
10759 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10763 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10765 /// This routine is only called when we have AVX2 and thus a reasonable
10766 /// instruction set for v16i16 shuffling..
10767 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10768 const X86Subtarget *Subtarget,
10769 SelectionDAG &DAG) {
10771 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10772 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10773 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10774 ArrayRef<int> Mask = SVOp->getMask();
10775 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10776 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10778 // Whenever we can lower this as a zext, that instruction is strictly faster
10779 // than any alternative. It also allows us to fold memory operands into the
10780 // shuffle in many cases.
10781 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10782 Mask, Subtarget, DAG))
10785 // Check for being able to broadcast a single element.
10786 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10787 Mask, Subtarget, DAG))
10790 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10794 // Use dedicated unpack instructions for masks that match their pattern.
10795 if (isShuffleEquivalent(Mask,
10796 // First 128-bit lane:
10797 0, 16, 1, 17, 2, 18, 3, 19,
10798 // Second 128-bit lane:
10799 8, 24, 9, 25, 10, 26, 11, 27))
10800 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10801 if (isShuffleEquivalent(Mask,
10802 // First 128-bit lane:
10803 4, 20, 5, 21, 6, 22, 7, 23,
10804 // Second 128-bit lane:
10805 12, 28, 13, 29, 14, 30, 15, 31))
10806 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10808 if (isSingleInputShuffleMask(Mask)) {
10809 // There are no generalized cross-lane shuffle operations available on i16
10811 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10812 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10815 SDValue PSHUFBMask[32];
10816 for (int i = 0; i < 16; ++i) {
10817 if (Mask[i] == -1) {
10818 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10822 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10823 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10824 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10825 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10827 return DAG.getNode(
10828 ISD::BITCAST, DL, MVT::v16i16,
10830 X86ISD::PSHUFB, DL, MVT::v32i8,
10831 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10832 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10835 // Try to use bit shift instructions.
10836 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10837 DL, MVT::v16i16, V1, V2, Mask, DAG))
10840 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10842 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10843 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10846 // Otherwise fall back on generic lowering.
10847 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10850 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10852 /// This routine is only called when we have AVX2 and thus a reasonable
10853 /// instruction set for v32i8 shuffling..
10854 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10855 const X86Subtarget *Subtarget,
10856 SelectionDAG &DAG) {
10858 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10859 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10860 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10861 ArrayRef<int> Mask = SVOp->getMask();
10862 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10863 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10865 // Whenever we can lower this as a zext, that instruction is strictly faster
10866 // than any alternative. It also allows us to fold memory operands into the
10867 // shuffle in many cases.
10868 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
10869 Mask, Subtarget, DAG))
10872 // Check for being able to broadcast a single element.
10873 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10874 Mask, Subtarget, DAG))
10877 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10881 // Use dedicated unpack instructions for masks that match their pattern.
10882 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10884 if (isShuffleEquivalent(
10886 // First 128-bit lane:
10887 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10888 // Second 128-bit lane:
10889 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10890 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10891 if (isShuffleEquivalent(
10893 // First 128-bit lane:
10894 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
10895 // Second 128-bit lane:
10896 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
10897 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
10899 if (isSingleInputShuffleMask(Mask)) {
10900 // There are no generalized cross-lane shuffle operations available on i8
10902 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
10903 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
10906 SDValue PSHUFBMask[32];
10907 for (int i = 0; i < 32; ++i)
10910 ? DAG.getUNDEF(MVT::i8)
10911 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
10913 return DAG.getNode(
10914 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
10915 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
10918 // Try to use bit shift instructions.
10919 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10920 DL, MVT::v32i8, V1, V2, Mask, DAG))
10923 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10925 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10926 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
10929 // Otherwise fall back on generic lowering.
10930 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
10933 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
10935 /// This routine either breaks down the specific type of a 256-bit x86 vector
10936 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
10937 /// together based on the available instructions.
10938 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10939 MVT VT, const X86Subtarget *Subtarget,
10940 SelectionDAG &DAG) {
10942 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10943 ArrayRef<int> Mask = SVOp->getMask();
10945 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
10946 // check for those subtargets here and avoid much of the subtarget querying in
10947 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
10948 // ability to manipulate a 256-bit vector with integer types. Since we'll use
10949 // floating point types there eventually, just immediately cast everything to
10950 // a float and operate entirely in that domain.
10951 if (VT.isInteger() && !Subtarget->hasAVX2()) {
10952 int ElementBits = VT.getScalarSizeInBits();
10953 if (ElementBits < 32)
10954 // No floating point type available, decompose into 128-bit vectors.
10955 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10957 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
10958 VT.getVectorNumElements());
10959 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
10960 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
10961 return DAG.getNode(ISD::BITCAST, DL, VT,
10962 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
10965 switch (VT.SimpleTy) {
10967 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10969 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10971 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10973 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10975 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10977 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10980 llvm_unreachable("Not a valid 256-bit x86 vector type!");
10984 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
10985 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10986 const X86Subtarget *Subtarget,
10987 SelectionDAG &DAG) {
10989 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10990 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10991 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10992 ArrayRef<int> Mask = SVOp->getMask();
10993 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10995 // X86 has dedicated unpack instructions that can handle specific blend
10996 // operations: UNPCKH and UNPCKL.
10997 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10998 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
10999 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11000 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11002 // FIXME: Implement direct support for this type!
11003 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11006 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11007 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11008 const X86Subtarget *Subtarget,
11009 SelectionDAG &DAG) {
11011 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11012 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11013 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11014 ArrayRef<int> Mask = SVOp->getMask();
11015 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11017 // Use dedicated unpack instructions for masks that match their pattern.
11018 if (isShuffleEquivalent(Mask,
11019 0, 16, 1, 17, 4, 20, 5, 21,
11020 8, 24, 9, 25, 12, 28, 13, 29))
11021 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11022 if (isShuffleEquivalent(Mask,
11023 2, 18, 3, 19, 6, 22, 7, 23,
11024 10, 26, 11, 27, 14, 30, 15, 31))
11025 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11027 // FIXME: Implement direct support for this type!
11028 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11031 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11032 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11033 const X86Subtarget *Subtarget,
11034 SelectionDAG &DAG) {
11036 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11037 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11038 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11039 ArrayRef<int> Mask = SVOp->getMask();
11040 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11042 // X86 has dedicated unpack instructions that can handle specific blend
11043 // operations: UNPCKH and UNPCKL.
11044 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11045 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11046 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11047 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11049 // FIXME: Implement direct support for this type!
11050 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11053 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11054 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11055 const X86Subtarget *Subtarget,
11056 SelectionDAG &DAG) {
11058 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11059 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11060 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11061 ArrayRef<int> Mask = SVOp->getMask();
11062 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11064 // Use dedicated unpack instructions for masks that match their pattern.
11065 if (isShuffleEquivalent(Mask,
11066 0, 16, 1, 17, 4, 20, 5, 21,
11067 8, 24, 9, 25, 12, 28, 13, 29))
11068 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11069 if (isShuffleEquivalent(Mask,
11070 2, 18, 3, 19, 6, 22, 7, 23,
11071 10, 26, 11, 27, 14, 30, 15, 31))
11072 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11074 // FIXME: Implement direct support for this type!
11075 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11078 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11079 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11080 const X86Subtarget *Subtarget,
11081 SelectionDAG &DAG) {
11083 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11084 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11085 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11086 ArrayRef<int> Mask = SVOp->getMask();
11087 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11088 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11090 // FIXME: Implement direct support for this type!
11091 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11094 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11095 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11096 const X86Subtarget *Subtarget,
11097 SelectionDAG &DAG) {
11099 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11100 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11101 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11102 ArrayRef<int> Mask = SVOp->getMask();
11103 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11104 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11106 // FIXME: Implement direct support for this type!
11107 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11110 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11112 /// This routine either breaks down the specific type of a 512-bit x86 vector
11113 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11114 /// together based on the available instructions.
11115 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11116 MVT VT, const X86Subtarget *Subtarget,
11117 SelectionDAG &DAG) {
11119 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11120 ArrayRef<int> Mask = SVOp->getMask();
11121 assert(Subtarget->hasAVX512() &&
11122 "Cannot lower 512-bit vectors w/ basic ISA!");
11124 // Check for being able to broadcast a single element.
11125 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11126 Mask, Subtarget, DAG))
11129 // Dispatch to each element type for lowering. If we don't have supprot for
11130 // specific element type shuffles at 512 bits, immediately split them and
11131 // lower them. Each lowering routine of a given type is allowed to assume that
11132 // the requisite ISA extensions for that element type are available.
11133 switch (VT.SimpleTy) {
11135 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11137 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11139 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11141 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11143 if (Subtarget->hasBWI())
11144 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11147 if (Subtarget->hasBWI())
11148 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11152 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11155 // Otherwise fall back on splitting.
11156 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11159 /// \brief Top-level lowering for x86 vector shuffles.
11161 /// This handles decomposition, canonicalization, and lowering of all x86
11162 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11163 /// above in helper routines. The canonicalization attempts to widen shuffles
11164 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11165 /// s.t. only one of the two inputs needs to be tested, etc.
11166 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11167 SelectionDAG &DAG) {
11168 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11169 ArrayRef<int> Mask = SVOp->getMask();
11170 SDValue V1 = Op.getOperand(0);
11171 SDValue V2 = Op.getOperand(1);
11172 MVT VT = Op.getSimpleValueType();
11173 int NumElements = VT.getVectorNumElements();
11176 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11178 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11179 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11180 if (V1IsUndef && V2IsUndef)
11181 return DAG.getUNDEF(VT);
11183 // When we create a shuffle node we put the UNDEF node to second operand,
11184 // but in some cases the first operand may be transformed to UNDEF.
11185 // In this case we should just commute the node.
11187 return DAG.getCommutedVectorShuffle(*SVOp);
11189 // Check for non-undef masks pointing at an undef vector and make the masks
11190 // undef as well. This makes it easier to match the shuffle based solely on
11194 if (M >= NumElements) {
11195 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11196 for (int &M : NewMask)
11197 if (M >= NumElements)
11199 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11202 // Try to collapse shuffles into using a vector type with fewer elements but
11203 // wider element types. We cap this to not form integers or floating point
11204 // elements wider than 64 bits, but it might be interesting to form i128
11205 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11206 SmallVector<int, 16> WidenedMask;
11207 if (VT.getScalarSizeInBits() < 64 &&
11208 canWidenShuffleElements(Mask, WidenedMask)) {
11209 MVT NewEltVT = VT.isFloatingPoint()
11210 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11211 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11212 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11213 // Make sure that the new vector type is legal. For example, v2f64 isn't
11215 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11216 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11217 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11218 return DAG.getNode(ISD::BITCAST, dl, VT,
11219 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11223 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11224 for (int M : SVOp->getMask())
11226 ++NumUndefElements;
11227 else if (M < NumElements)
11232 // Commute the shuffle as needed such that more elements come from V1 than
11233 // V2. This allows us to match the shuffle pattern strictly on how many
11234 // elements come from V1 without handling the symmetric cases.
11235 if (NumV2Elements > NumV1Elements)
11236 return DAG.getCommutedVectorShuffle(*SVOp);
11238 // When the number of V1 and V2 elements are the same, try to minimize the
11239 // number of uses of V2 in the low half of the vector. When that is tied,
11240 // ensure that the sum of indices for V1 is equal to or lower than the sum
11241 // indices for V2. When those are equal, try to ensure that the number of odd
11242 // indices for V1 is lower than the number of odd indices for V2.
11243 if (NumV1Elements == NumV2Elements) {
11244 int LowV1Elements = 0, LowV2Elements = 0;
11245 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11246 if (M >= NumElements)
11250 if (LowV2Elements > LowV1Elements) {
11251 return DAG.getCommutedVectorShuffle(*SVOp);
11252 } else if (LowV2Elements == LowV1Elements) {
11253 int SumV1Indices = 0, SumV2Indices = 0;
11254 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11255 if (SVOp->getMask()[i] >= NumElements)
11257 else if (SVOp->getMask()[i] >= 0)
11259 if (SumV2Indices < SumV1Indices) {
11260 return DAG.getCommutedVectorShuffle(*SVOp);
11261 } else if (SumV2Indices == SumV1Indices) {
11262 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11263 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11264 if (SVOp->getMask()[i] >= NumElements)
11265 NumV2OddIndices += i % 2;
11266 else if (SVOp->getMask()[i] >= 0)
11267 NumV1OddIndices += i % 2;
11268 if (NumV2OddIndices < NumV1OddIndices)
11269 return DAG.getCommutedVectorShuffle(*SVOp);
11274 // For each vector width, delegate to a specialized lowering routine.
11275 if (VT.getSizeInBits() == 128)
11276 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11278 if (VT.getSizeInBits() == 256)
11279 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11281 // Force AVX-512 vectors to be scalarized for now.
11282 // FIXME: Implement AVX-512 support!
11283 if (VT.getSizeInBits() == 512)
11284 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11286 llvm_unreachable("Unimplemented!");
11290 //===----------------------------------------------------------------------===//
11291 // Legacy vector shuffle lowering
11293 // This code is the legacy code handling vector shuffles until the above
11294 // replaces its functionality and performance.
11295 //===----------------------------------------------------------------------===//
11297 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11298 bool hasInt256, unsigned *MaskOut = nullptr) {
11299 MVT EltVT = VT.getVectorElementType();
11301 // There is no blend with immediate in AVX-512.
11302 if (VT.is512BitVector())
11305 if (!hasSSE41 || EltVT == MVT::i8)
11307 if (!hasInt256 && VT == MVT::v16i16)
11310 unsigned MaskValue = 0;
11311 unsigned NumElems = VT.getVectorNumElements();
11312 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11313 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11314 unsigned NumElemsInLane = NumElems / NumLanes;
11316 // Blend for v16i16 should be symetric for the both lanes.
11317 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11319 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11320 int EltIdx = MaskVals[i];
11322 if ((EltIdx < 0 || EltIdx == (int)i) &&
11323 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11326 if (((unsigned)EltIdx == (i + NumElems)) &&
11327 (SndLaneEltIdx < 0 ||
11328 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11329 MaskValue |= (1 << i);
11335 *MaskOut = MaskValue;
11339 // Try to lower a shuffle node into a simple blend instruction.
11340 // This function assumes isBlendMask returns true for this
11341 // SuffleVectorSDNode
11342 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11343 unsigned MaskValue,
11344 const X86Subtarget *Subtarget,
11345 SelectionDAG &DAG) {
11346 MVT VT = SVOp->getSimpleValueType(0);
11347 MVT EltVT = VT.getVectorElementType();
11348 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11349 Subtarget->hasInt256() && "Trying to lower a "
11350 "VECTOR_SHUFFLE to a Blend but "
11351 "with the wrong mask"));
11352 SDValue V1 = SVOp->getOperand(0);
11353 SDValue V2 = SVOp->getOperand(1);
11355 unsigned NumElems = VT.getVectorNumElements();
11357 // Convert i32 vectors to floating point if it is not AVX2.
11358 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11360 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11361 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11363 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11364 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11367 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11368 DAG.getConstant(MaskValue, MVT::i32));
11369 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11372 /// In vector type \p VT, return true if the element at index \p InputIdx
11373 /// falls on a different 128-bit lane than \p OutputIdx.
11374 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11375 unsigned OutputIdx) {
11376 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11377 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11380 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11381 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11382 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11383 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11385 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11386 SelectionDAG &DAG) {
11387 MVT VT = V1.getSimpleValueType();
11388 assert(VT.is128BitVector() || VT.is256BitVector());
11390 MVT EltVT = VT.getVectorElementType();
11391 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11392 unsigned NumElts = VT.getVectorNumElements();
11394 SmallVector<SDValue, 32> PshufbMask;
11395 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11396 int InputIdx = MaskVals[OutputIdx];
11397 unsigned InputByteIdx;
11399 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11400 InputByteIdx = 0x80;
11402 // Cross lane is not allowed.
11403 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11405 InputByteIdx = InputIdx * EltSizeInBytes;
11406 // Index is an byte offset within the 128-bit lane.
11407 InputByteIdx &= 0xf;
11410 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11411 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11412 if (InputByteIdx != 0x80)
11417 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11419 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11420 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11421 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11424 // v8i16 shuffles - Prefer shuffles in the following order:
11425 // 1. [all] pshuflw, pshufhw, optional move
11426 // 2. [ssse3] 1 x pshufb
11427 // 3. [ssse3] 2 x pshufb + 1 x por
11428 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11430 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11431 SelectionDAG &DAG) {
11432 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11433 SDValue V1 = SVOp->getOperand(0);
11434 SDValue V2 = SVOp->getOperand(1);
11436 SmallVector<int, 8> MaskVals;
11438 // Determine if more than 1 of the words in each of the low and high quadwords
11439 // of the result come from the same quadword of one of the two inputs. Undef
11440 // mask values count as coming from any quadword, for better codegen.
11442 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11443 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11444 unsigned LoQuad[] = { 0, 0, 0, 0 };
11445 unsigned HiQuad[] = { 0, 0, 0, 0 };
11446 // Indices of quads used.
11447 std::bitset<4> InputQuads;
11448 for (unsigned i = 0; i < 8; ++i) {
11449 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11450 int EltIdx = SVOp->getMaskElt(i);
11451 MaskVals.push_back(EltIdx);
11459 ++Quad[EltIdx / 4];
11460 InputQuads.set(EltIdx / 4);
11463 int BestLoQuad = -1;
11464 unsigned MaxQuad = 1;
11465 for (unsigned i = 0; i < 4; ++i) {
11466 if (LoQuad[i] > MaxQuad) {
11468 MaxQuad = LoQuad[i];
11472 int BestHiQuad = -1;
11474 for (unsigned i = 0; i < 4; ++i) {
11475 if (HiQuad[i] > MaxQuad) {
11477 MaxQuad = HiQuad[i];
11481 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11482 // of the two input vectors, shuffle them into one input vector so only a
11483 // single pshufb instruction is necessary. If there are more than 2 input
11484 // quads, disable the next transformation since it does not help SSSE3.
11485 bool V1Used = InputQuads[0] || InputQuads[1];
11486 bool V2Used = InputQuads[2] || InputQuads[3];
11487 if (Subtarget->hasSSSE3()) {
11488 if (InputQuads.count() == 2 && V1Used && V2Used) {
11489 BestLoQuad = InputQuads[0] ? 0 : 1;
11490 BestHiQuad = InputQuads[2] ? 2 : 3;
11492 if (InputQuads.count() > 2) {
11498 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11499 // the shuffle mask. If a quad is scored as -1, that means that it contains
11500 // words from all 4 input quadwords.
11502 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11504 BestLoQuad < 0 ? 0 : BestLoQuad,
11505 BestHiQuad < 0 ? 1 : BestHiQuad
11507 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11508 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11509 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11510 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11512 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11513 // source words for the shuffle, to aid later transformations.
11514 bool AllWordsInNewV = true;
11515 bool InOrder[2] = { true, true };
11516 for (unsigned i = 0; i != 8; ++i) {
11517 int idx = MaskVals[i];
11519 InOrder[i/4] = false;
11520 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11522 AllWordsInNewV = false;
11526 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11527 if (AllWordsInNewV) {
11528 for (int i = 0; i != 8; ++i) {
11529 int idx = MaskVals[i];
11532 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11533 if ((idx != i) && idx < 4)
11535 if ((idx != i) && idx > 3)
11544 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11545 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11546 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11547 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11548 unsigned TargetMask = 0;
11549 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11550 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11551 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11552 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11553 getShufflePSHUFLWImmediate(SVOp);
11554 V1 = NewV.getOperand(0);
11555 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11559 // Promote splats to a larger type which usually leads to more efficient code.
11560 // FIXME: Is this true if pshufb is available?
11561 if (SVOp->isSplat())
11562 return PromoteSplat(SVOp, DAG);
11564 // If we have SSSE3, and all words of the result are from 1 input vector,
11565 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11566 // is present, fall back to case 4.
11567 if (Subtarget->hasSSSE3()) {
11568 SmallVector<SDValue,16> pshufbMask;
11570 // If we have elements from both input vectors, set the high bit of the
11571 // shuffle mask element to zero out elements that come from V2 in the V1
11572 // mask, and elements that come from V1 in the V2 mask, so that the two
11573 // results can be OR'd together.
11574 bool TwoInputs = V1Used && V2Used;
11575 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11577 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11579 // Calculate the shuffle mask for the second input, shuffle it, and
11580 // OR it with the first shuffled input.
11581 CommuteVectorShuffleMask(MaskVals, 8);
11582 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11583 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11584 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11587 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11588 // and update MaskVals with new element order.
11589 std::bitset<8> InOrder;
11590 if (BestLoQuad >= 0) {
11591 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11592 for (int i = 0; i != 4; ++i) {
11593 int idx = MaskVals[i];
11596 } else if ((idx / 4) == BestLoQuad) {
11597 MaskV[i] = idx & 3;
11601 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11604 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11605 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11606 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11607 NewV.getOperand(0),
11608 getShufflePSHUFLWImmediate(SVOp), DAG);
11612 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11613 // and update MaskVals with the new element order.
11614 if (BestHiQuad >= 0) {
11615 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11616 for (unsigned i = 4; i != 8; ++i) {
11617 int idx = MaskVals[i];
11620 } else if ((idx / 4) == BestHiQuad) {
11621 MaskV[i] = (idx & 3) + 4;
11625 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11628 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11629 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11630 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11631 NewV.getOperand(0),
11632 getShufflePSHUFHWImmediate(SVOp), DAG);
11636 // In case BestHi & BestLo were both -1, which means each quadword has a word
11637 // from each of the four input quadwords, calculate the InOrder bitvector now
11638 // before falling through to the insert/extract cleanup.
11639 if (BestLoQuad == -1 && BestHiQuad == -1) {
11641 for (int i = 0; i != 8; ++i)
11642 if (MaskVals[i] < 0 || MaskVals[i] == i)
11646 // The other elements are put in the right place using pextrw and pinsrw.
11647 for (unsigned i = 0; i != 8; ++i) {
11650 int EltIdx = MaskVals[i];
11653 SDValue ExtOp = (EltIdx < 8) ?
11654 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11655 DAG.getIntPtrConstant(EltIdx)) :
11656 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11657 DAG.getIntPtrConstant(EltIdx - 8));
11658 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11659 DAG.getIntPtrConstant(i));
11664 /// \brief v16i16 shuffles
11666 /// FIXME: We only support generation of a single pshufb currently. We can
11667 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11668 /// well (e.g 2 x pshufb + 1 x por).
11670 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11671 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11672 SDValue V1 = SVOp->getOperand(0);
11673 SDValue V2 = SVOp->getOperand(1);
11676 if (V2.getOpcode() != ISD::UNDEF)
11679 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11680 return getPSHUFB(MaskVals, V1, dl, DAG);
11683 // v16i8 shuffles - Prefer shuffles in the following order:
11684 // 1. [ssse3] 1 x pshufb
11685 // 2. [ssse3] 2 x pshufb + 1 x por
11686 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11687 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11688 const X86Subtarget* Subtarget,
11689 SelectionDAG &DAG) {
11690 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11691 SDValue V1 = SVOp->getOperand(0);
11692 SDValue V2 = SVOp->getOperand(1);
11694 ArrayRef<int> MaskVals = SVOp->getMask();
11696 // Promote splats to a larger type which usually leads to more efficient code.
11697 // FIXME: Is this true if pshufb is available?
11698 if (SVOp->isSplat())
11699 return PromoteSplat(SVOp, DAG);
11701 // If we have SSSE3, case 1 is generated when all result bytes come from
11702 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11703 // present, fall back to case 3.
11705 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11706 if (Subtarget->hasSSSE3()) {
11707 SmallVector<SDValue,16> pshufbMask;
11709 // If all result elements are from one input vector, then only translate
11710 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11712 // Otherwise, we have elements from both input vectors, and must zero out
11713 // elements that come from V2 in the first mask, and V1 in the second mask
11714 // so that we can OR them together.
11715 for (unsigned i = 0; i != 16; ++i) {
11716 int EltIdx = MaskVals[i];
11717 if (EltIdx < 0 || EltIdx >= 16)
11719 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11721 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11722 DAG.getNode(ISD::BUILD_VECTOR, dl,
11723 MVT::v16i8, pshufbMask));
11725 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11726 // the 2nd operand if it's undefined or zero.
11727 if (V2.getOpcode() == ISD::UNDEF ||
11728 ISD::isBuildVectorAllZeros(V2.getNode()))
11731 // Calculate the shuffle mask for the second input, shuffle it, and
11732 // OR it with the first shuffled input.
11733 pshufbMask.clear();
11734 for (unsigned i = 0; i != 16; ++i) {
11735 int EltIdx = MaskVals[i];
11736 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11737 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11739 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11740 DAG.getNode(ISD::BUILD_VECTOR, dl,
11741 MVT::v16i8, pshufbMask));
11742 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11745 // No SSSE3 - Calculate in place words and then fix all out of place words
11746 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11747 // the 16 different words that comprise the two doublequadword input vectors.
11748 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11749 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11751 for (int i = 0; i != 8; ++i) {
11752 int Elt0 = MaskVals[i*2];
11753 int Elt1 = MaskVals[i*2+1];
11755 // This word of the result is all undef, skip it.
11756 if (Elt0 < 0 && Elt1 < 0)
11759 // This word of the result is already in the correct place, skip it.
11760 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11763 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11764 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11767 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11768 // using a single extract together, load it and store it.
11769 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11770 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11771 DAG.getIntPtrConstant(Elt1 / 2));
11772 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11773 DAG.getIntPtrConstant(i));
11777 // If Elt1 is defined, extract it from the appropriate source. If the
11778 // source byte is not also odd, shift the extracted word left 8 bits
11779 // otherwise clear the bottom 8 bits if we need to do an or.
11781 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11782 DAG.getIntPtrConstant(Elt1 / 2));
11783 if ((Elt1 & 1) == 0)
11784 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11786 TLI.getShiftAmountTy(InsElt.getValueType())));
11787 else if (Elt0 >= 0)
11788 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11789 DAG.getConstant(0xFF00, MVT::i16));
11791 // If Elt0 is defined, extract it from the appropriate source. If the
11792 // source byte is not also even, shift the extracted word right 8 bits. If
11793 // Elt1 was also defined, OR the extracted values together before
11794 // inserting them in the result.
11796 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11797 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11798 if ((Elt0 & 1) != 0)
11799 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11801 TLI.getShiftAmountTy(InsElt0.getValueType())));
11802 else if (Elt1 >= 0)
11803 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11804 DAG.getConstant(0x00FF, MVT::i16));
11805 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11808 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11809 DAG.getIntPtrConstant(i));
11811 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11814 // v32i8 shuffles - Translate to VPSHUFB if possible.
11816 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11817 const X86Subtarget *Subtarget,
11818 SelectionDAG &DAG) {
11819 MVT VT = SVOp->getSimpleValueType(0);
11820 SDValue V1 = SVOp->getOperand(0);
11821 SDValue V2 = SVOp->getOperand(1);
11823 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11825 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11826 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11827 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11829 // VPSHUFB may be generated if
11830 // (1) one of input vector is undefined or zeroinitializer.
11831 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11832 // And (2) the mask indexes don't cross the 128-bit lane.
11833 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11834 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11837 if (V1IsAllZero && !V2IsAllZero) {
11838 CommuteVectorShuffleMask(MaskVals, 32);
11841 return getPSHUFB(MaskVals, V1, dl, DAG);
11844 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11845 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11846 /// done when every pair / quad of shuffle mask elements point to elements in
11847 /// the right sequence. e.g.
11848 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11850 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11851 SelectionDAG &DAG) {
11852 MVT VT = SVOp->getSimpleValueType(0);
11854 unsigned NumElems = VT.getVectorNumElements();
11857 switch (VT.SimpleTy) {
11858 default: llvm_unreachable("Unexpected!");
11861 return SDValue(SVOp, 0);
11862 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11863 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11864 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11865 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11866 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11867 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11870 SmallVector<int, 8> MaskVec;
11871 for (unsigned i = 0; i != NumElems; i += Scale) {
11873 for (unsigned j = 0; j != Scale; ++j) {
11874 int EltIdx = SVOp->getMaskElt(i+j);
11878 StartIdx = (EltIdx / Scale);
11879 if (EltIdx != (int)(StartIdx*Scale + j))
11882 MaskVec.push_back(StartIdx);
11885 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
11886 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
11887 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
11890 /// getVZextMovL - Return a zero-extending vector move low node.
11892 static SDValue getVZextMovL(MVT VT, MVT OpVT,
11893 SDValue SrcOp, SelectionDAG &DAG,
11894 const X86Subtarget *Subtarget, SDLoc dl) {
11895 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
11896 LoadSDNode *LD = nullptr;
11897 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
11898 LD = dyn_cast<LoadSDNode>(SrcOp);
11900 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
11902 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
11903 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
11904 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
11905 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
11906 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
11908 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
11909 return DAG.getNode(ISD::BITCAST, dl, VT,
11910 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11911 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11913 SrcOp.getOperand(0)
11919 return DAG.getNode(ISD::BITCAST, dl, VT,
11920 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11921 DAG.getNode(ISD::BITCAST, dl,
11925 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
11926 /// which could not be matched by any known target speficic shuffle
11928 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11930 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
11931 if (NewOp.getNode())
11934 MVT VT = SVOp->getSimpleValueType(0);
11936 unsigned NumElems = VT.getVectorNumElements();
11937 unsigned NumLaneElems = NumElems / 2;
11940 MVT EltVT = VT.getVectorElementType();
11941 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
11944 SmallVector<int, 16> Mask;
11945 for (unsigned l = 0; l < 2; ++l) {
11946 // Build a shuffle mask for the output, discovering on the fly which
11947 // input vectors to use as shuffle operands (recorded in InputUsed).
11948 // If building a suitable shuffle vector proves too hard, then bail
11949 // out with UseBuildVector set.
11950 bool UseBuildVector = false;
11951 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
11952 unsigned LaneStart = l * NumLaneElems;
11953 for (unsigned i = 0; i != NumLaneElems; ++i) {
11954 // The mask element. This indexes into the input.
11955 int Idx = SVOp->getMaskElt(i+LaneStart);
11957 // the mask element does not index into any input vector.
11958 Mask.push_back(-1);
11962 // The input vector this mask element indexes into.
11963 int Input = Idx / NumLaneElems;
11965 // Turn the index into an offset from the start of the input vector.
11966 Idx -= Input * NumLaneElems;
11968 // Find or create a shuffle vector operand to hold this input.
11970 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
11971 if (InputUsed[OpNo] == Input)
11972 // This input vector is already an operand.
11974 if (InputUsed[OpNo] < 0) {
11975 // Create a new operand for this input vector.
11976 InputUsed[OpNo] = Input;
11981 if (OpNo >= array_lengthof(InputUsed)) {
11982 // More than two input vectors used! Give up on trying to create a
11983 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
11984 UseBuildVector = true;
11988 // Add the mask index for the new shuffle vector.
11989 Mask.push_back(Idx + OpNo * NumLaneElems);
11992 if (UseBuildVector) {
11993 SmallVector<SDValue, 16> SVOps;
11994 for (unsigned i = 0; i != NumLaneElems; ++i) {
11995 // The mask element. This indexes into the input.
11996 int Idx = SVOp->getMaskElt(i+LaneStart);
11998 SVOps.push_back(DAG.getUNDEF(EltVT));
12002 // The input vector this mask element indexes into.
12003 int Input = Idx / NumElems;
12005 // Turn the index into an offset from the start of the input vector.
12006 Idx -= Input * NumElems;
12008 // Extract the vector element by hand.
12009 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12010 SVOp->getOperand(Input),
12011 DAG.getIntPtrConstant(Idx)));
12014 // Construct the output using a BUILD_VECTOR.
12015 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12016 } else if (InputUsed[0] < 0) {
12017 // No input vectors were used! The result is undefined.
12018 Output[l] = DAG.getUNDEF(NVT);
12020 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12021 (InputUsed[0] % 2) * NumLaneElems,
12023 // If only one input was used, use an undefined vector for the other.
12024 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12025 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12026 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12027 // At least one input vector was used. Create a new shuffle vector.
12028 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12034 // Concatenate the result back
12035 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12038 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12039 /// 4 elements, and match them with several different shuffle types.
12041 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12042 SDValue V1 = SVOp->getOperand(0);
12043 SDValue V2 = SVOp->getOperand(1);
12045 MVT VT = SVOp->getSimpleValueType(0);
12047 assert(VT.is128BitVector() && "Unsupported vector size");
12049 std::pair<int, int> Locs[4];
12050 int Mask1[] = { -1, -1, -1, -1 };
12051 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12053 unsigned NumHi = 0;
12054 unsigned NumLo = 0;
12055 for (unsigned i = 0; i != 4; ++i) {
12056 int Idx = PermMask[i];
12058 Locs[i] = std::make_pair(-1, -1);
12060 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12062 Locs[i] = std::make_pair(0, NumLo);
12063 Mask1[NumLo] = Idx;
12066 Locs[i] = std::make_pair(1, NumHi);
12068 Mask1[2+NumHi] = Idx;
12074 if (NumLo <= 2 && NumHi <= 2) {
12075 // If no more than two elements come from either vector. This can be
12076 // implemented with two shuffles. First shuffle gather the elements.
12077 // The second shuffle, which takes the first shuffle as both of its
12078 // vector operands, put the elements into the right order.
12079 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12081 int Mask2[] = { -1, -1, -1, -1 };
12083 for (unsigned i = 0; i != 4; ++i)
12084 if (Locs[i].first != -1) {
12085 unsigned Idx = (i < 2) ? 0 : 4;
12086 Idx += Locs[i].first * 2 + Locs[i].second;
12090 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12093 if (NumLo == 3 || NumHi == 3) {
12094 // Otherwise, we must have three elements from one vector, call it X, and
12095 // one element from the other, call it Y. First, use a shufps to build an
12096 // intermediate vector with the one element from Y and the element from X
12097 // that will be in the same half in the final destination (the indexes don't
12098 // matter). Then, use a shufps to build the final vector, taking the half
12099 // containing the element from Y from the intermediate, and the other half
12102 // Normalize it so the 3 elements come from V1.
12103 CommuteVectorShuffleMask(PermMask, 4);
12107 // Find the element from V2.
12109 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12110 int Val = PermMask[HiIndex];
12117 Mask1[0] = PermMask[HiIndex];
12119 Mask1[2] = PermMask[HiIndex^1];
12121 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12123 if (HiIndex >= 2) {
12124 Mask1[0] = PermMask[0];
12125 Mask1[1] = PermMask[1];
12126 Mask1[2] = HiIndex & 1 ? 6 : 4;
12127 Mask1[3] = HiIndex & 1 ? 4 : 6;
12128 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12131 Mask1[0] = HiIndex & 1 ? 2 : 0;
12132 Mask1[1] = HiIndex & 1 ? 0 : 2;
12133 Mask1[2] = PermMask[2];
12134 Mask1[3] = PermMask[3];
12139 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12142 // Break it into (shuffle shuffle_hi, shuffle_lo).
12143 int LoMask[] = { -1, -1, -1, -1 };
12144 int HiMask[] = { -1, -1, -1, -1 };
12146 int *MaskPtr = LoMask;
12147 unsigned MaskIdx = 0;
12148 unsigned LoIdx = 0;
12149 unsigned HiIdx = 2;
12150 for (unsigned i = 0; i != 4; ++i) {
12157 int Idx = PermMask[i];
12159 Locs[i] = std::make_pair(-1, -1);
12160 } else if (Idx < 4) {
12161 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12162 MaskPtr[LoIdx] = Idx;
12165 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12166 MaskPtr[HiIdx] = Idx;
12171 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12172 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12173 int MaskOps[] = { -1, -1, -1, -1 };
12174 for (unsigned i = 0; i != 4; ++i)
12175 if (Locs[i].first != -1)
12176 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12177 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12180 static bool MayFoldVectorLoad(SDValue V) {
12181 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12182 V = V.getOperand(0);
12184 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12185 V = V.getOperand(0);
12186 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12187 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12188 // BUILD_VECTOR (load), undef
12189 V = V.getOperand(0);
12191 return MayFoldLoad(V);
12195 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12196 MVT VT = Op.getSimpleValueType();
12198 // Canonizalize to v2f64.
12199 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12200 return DAG.getNode(ISD::BITCAST, dl, VT,
12201 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12206 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12208 SDValue V1 = Op.getOperand(0);
12209 SDValue V2 = Op.getOperand(1);
12210 MVT VT = Op.getSimpleValueType();
12212 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12214 if (HasSSE2 && VT == MVT::v2f64)
12215 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12217 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1)
12218 return DAG.getNode(ISD::BITCAST, dl, VT,
12219 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12220 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12221 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12225 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12226 SDValue V1 = Op.getOperand(0);
12227 SDValue V2 = Op.getOperand(1);
12228 MVT VT = Op.getSimpleValueType();
12230 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12231 "unsupported shuffle type");
12233 if (V2.getOpcode() == ISD::UNDEF)
12237 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12241 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12242 SDValue V1 = Op.getOperand(0);
12243 SDValue V2 = Op.getOperand(1);
12244 MVT VT = Op.getSimpleValueType();
12245 unsigned NumElems = VT.getVectorNumElements();
12247 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12248 // operand of these instructions is only memory, so check if there's a
12249 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12251 bool CanFoldLoad = false;
12253 // Trivial case, when V2 comes from a load.
12254 if (MayFoldVectorLoad(V2))
12255 CanFoldLoad = true;
12257 // When V1 is a load, it can be folded later into a store in isel, example:
12258 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12260 // (MOVLPSmr addr:$src1, VR128:$src2)
12261 // So, recognize this potential and also use MOVLPS or MOVLPD
12262 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12263 CanFoldLoad = true;
12265 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12267 if (HasSSE2 && NumElems == 2)
12268 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12271 // If we don't care about the second element, proceed to use movss.
12272 if (SVOp->getMaskElt(1) != -1)
12273 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12276 // movl and movlp will both match v2i64, but v2i64 is never matched by
12277 // movl earlier because we make it strict to avoid messing with the movlp load
12278 // folding logic (see the code above getMOVLP call). Match it here then,
12279 // this is horrible, but will stay like this until we move all shuffle
12280 // matching to x86 specific nodes. Note that for the 1st condition all
12281 // types are matched with movsd.
12283 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12284 // as to remove this logic from here, as much as possible
12285 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12286 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12287 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12290 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12292 // Invert the operand order and use SHUFPS to match it.
12293 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12294 getShuffleSHUFImmediate(SVOp), DAG);
12297 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12298 SelectionDAG &DAG) {
12300 MVT VT = Load->getSimpleValueType(0);
12301 MVT EVT = VT.getVectorElementType();
12302 SDValue Addr = Load->getOperand(1);
12303 SDValue NewAddr = DAG.getNode(
12304 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12305 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12308 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12309 DAG.getMachineFunction().getMachineMemOperand(
12310 Load->getMemOperand(), 0, EVT.getStoreSize()));
12314 // It is only safe to call this function if isINSERTPSMask is true for
12315 // this shufflevector mask.
12316 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12317 SelectionDAG &DAG) {
12318 // Generate an insertps instruction when inserting an f32 from memory onto a
12319 // v4f32 or when copying a member from one v4f32 to another.
12320 // We also use it for transferring i32 from one register to another,
12321 // since it simply copies the same bits.
12322 // If we're transferring an i32 from memory to a specific element in a
12323 // register, we output a generic DAG that will match the PINSRD
12325 MVT VT = SVOp->getSimpleValueType(0);
12326 MVT EVT = VT.getVectorElementType();
12327 SDValue V1 = SVOp->getOperand(0);
12328 SDValue V2 = SVOp->getOperand(1);
12329 auto Mask = SVOp->getMask();
12330 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12331 "unsupported vector type for insertps/pinsrd");
12333 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12334 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12335 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12339 unsigned DestIndex;
12343 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12346 // If we have 1 element from each vector, we have to check if we're
12347 // changing V1's element's place. If so, we're done. Otherwise, we
12348 // should assume we're changing V2's element's place and behave
12350 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12351 assert(DestIndex <= INT32_MAX && "truncated destination index");
12352 if (FromV1 == FromV2 &&
12353 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12357 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12360 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12361 "More than one element from V1 and from V2, or no elements from one "
12362 "of the vectors. This case should not have returned true from "
12367 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12370 // Get an index into the source vector in the range [0,4) (the mask is
12371 // in the range [0,8) because it can address V1 and V2)
12372 unsigned SrcIndex = Mask[DestIndex] % 4;
12373 if (MayFoldLoad(From)) {
12374 // Trivial case, when From comes from a load and is only used by the
12375 // shuffle. Make it use insertps from the vector that we need from that
12378 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12379 if (!NewLoad.getNode())
12382 if (EVT == MVT::f32) {
12383 // Create this as a scalar to vector to match the instruction pattern.
12384 SDValue LoadScalarToVector =
12385 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12386 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12387 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12389 } else { // EVT == MVT::i32
12390 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12391 // instruction, to match the PINSRD instruction, which loads an i32 to a
12392 // certain vector element.
12393 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12394 DAG.getConstant(DestIndex, MVT::i32));
12398 // Vector-element-to-vector
12399 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12400 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12403 // Reduce a vector shuffle to zext.
12404 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12405 SelectionDAG &DAG) {
12406 // PMOVZX is only available from SSE41.
12407 if (!Subtarget->hasSSE41())
12410 MVT VT = Op.getSimpleValueType();
12412 // Only AVX2 support 256-bit vector integer extending.
12413 if (!Subtarget->hasInt256() && VT.is256BitVector())
12416 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12418 SDValue V1 = Op.getOperand(0);
12419 SDValue V2 = Op.getOperand(1);
12420 unsigned NumElems = VT.getVectorNumElements();
12422 // Extending is an unary operation and the element type of the source vector
12423 // won't be equal to or larger than i64.
12424 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12425 VT.getVectorElementType() == MVT::i64)
12428 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12429 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12430 while ((1U << Shift) < NumElems) {
12431 if (SVOp->getMaskElt(1U << Shift) == 1)
12434 // The maximal ratio is 8, i.e. from i8 to i64.
12439 // Check the shuffle mask.
12440 unsigned Mask = (1U << Shift) - 1;
12441 for (unsigned i = 0; i != NumElems; ++i) {
12442 int EltIdx = SVOp->getMaskElt(i);
12443 if ((i & Mask) != 0 && EltIdx != -1)
12445 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12449 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12450 MVT NeVT = MVT::getIntegerVT(NBits);
12451 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12453 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12456 return DAG.getNode(ISD::BITCAST, DL, VT,
12457 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12460 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12461 SelectionDAG &DAG) {
12462 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12463 MVT VT = Op.getSimpleValueType();
12465 SDValue V1 = Op.getOperand(0);
12466 SDValue V2 = Op.getOperand(1);
12468 if (isZeroShuffle(SVOp))
12469 return getZeroVector(VT, Subtarget, DAG, dl);
12471 // Handle splat operations
12472 if (SVOp->isSplat()) {
12473 // Use vbroadcast whenever the splat comes from a foldable load
12474 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12475 if (Broadcast.getNode())
12479 // Check integer expanding shuffles.
12480 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12481 if (NewOp.getNode())
12484 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12486 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12487 VT == MVT::v32i8) {
12488 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12489 if (NewOp.getNode())
12490 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12491 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12492 // FIXME: Figure out a cleaner way to do this.
12493 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12494 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12495 if (NewOp.getNode()) {
12496 MVT NewVT = NewOp.getSimpleValueType();
12497 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12498 NewVT, true, false))
12499 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12502 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12503 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12504 if (NewOp.getNode()) {
12505 MVT NewVT = NewOp.getSimpleValueType();
12506 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12507 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12516 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12517 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12518 SDValue V1 = Op.getOperand(0);
12519 SDValue V2 = Op.getOperand(1);
12520 MVT VT = Op.getSimpleValueType();
12522 unsigned NumElems = VT.getVectorNumElements();
12523 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12524 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12525 bool V1IsSplat = false;
12526 bool V2IsSplat = false;
12527 bool HasSSE2 = Subtarget->hasSSE2();
12528 bool HasFp256 = Subtarget->hasFp256();
12529 bool HasInt256 = Subtarget->hasInt256();
12530 MachineFunction &MF = DAG.getMachineFunction();
12531 bool OptForSize = MF.getFunction()->getAttributes().
12532 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
12534 // Check if we should use the experimental vector shuffle lowering. If so,
12535 // delegate completely to that code path.
12536 if (ExperimentalVectorShuffleLowering)
12537 return lowerVectorShuffle(Op, Subtarget, DAG);
12539 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12541 if (V1IsUndef && V2IsUndef)
12542 return DAG.getUNDEF(VT);
12544 // When we create a shuffle node we put the UNDEF node to second operand,
12545 // but in some cases the first operand may be transformed to UNDEF.
12546 // In this case we should just commute the node.
12548 return DAG.getCommutedVectorShuffle(*SVOp);
12550 // Vector shuffle lowering takes 3 steps:
12552 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12553 // narrowing and commutation of operands should be handled.
12554 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12556 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12557 // so the shuffle can be broken into other shuffles and the legalizer can
12558 // try the lowering again.
12560 // The general idea is that no vector_shuffle operation should be left to
12561 // be matched during isel, all of them must be converted to a target specific
12564 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12565 // narrowing and commutation of operands should be handled. The actual code
12566 // doesn't include all of those, work in progress...
12567 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12568 if (NewOp.getNode())
12571 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12573 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12574 // unpckh_undef). Only use pshufd if speed is more important than size.
12575 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12576 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12577 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12578 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12580 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12581 V2IsUndef && MayFoldVectorLoad(V1))
12582 return getMOVDDup(Op, dl, V1, DAG);
12584 if (isMOVHLPS_v_undef_Mask(M, VT))
12585 return getMOVHighToLow(Op, dl, DAG);
12587 // Use to match splats
12588 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12589 (VT == MVT::v2f64 || VT == MVT::v2i64))
12590 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12592 if (isPSHUFDMask(M, VT)) {
12593 // The actual implementation will match the mask in the if above and then
12594 // during isel it can match several different instructions, not only pshufd
12595 // as its name says, sad but true, emulate the behavior for now...
12596 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12597 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12599 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12601 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12602 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12604 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12605 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12608 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12612 if (isPALIGNRMask(M, VT, Subtarget))
12613 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12614 getShufflePALIGNRImmediate(SVOp),
12617 if (isVALIGNMask(M, VT, Subtarget))
12618 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12619 getShuffleVALIGNImmediate(SVOp),
12622 // Check if this can be converted into a logical shift.
12623 bool isLeft = false;
12624 unsigned ShAmt = 0;
12626 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12627 if (isShift && ShVal.hasOneUse()) {
12628 // If the shifted value has multiple uses, it may be cheaper to use
12629 // v_set0 + movlhps or movhlps, etc.
12630 MVT EltVT = VT.getVectorElementType();
12631 ShAmt *= EltVT.getSizeInBits();
12632 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12635 if (isMOVLMask(M, VT)) {
12636 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12637 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12638 if (!isMOVLPMask(M, VT)) {
12639 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12640 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12642 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12643 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12647 // FIXME: fold these into legal mask.
12648 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12649 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12651 if (isMOVHLPSMask(M, VT))
12652 return getMOVHighToLow(Op, dl, DAG);
12654 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12655 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12657 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12658 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12660 if (isMOVLPMask(M, VT))
12661 return getMOVLP(Op, dl, DAG, HasSSE2);
12663 if (ShouldXformToMOVHLPS(M, VT) ||
12664 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12665 return DAG.getCommutedVectorShuffle(*SVOp);
12668 // No better options. Use a vshldq / vsrldq.
12669 MVT EltVT = VT.getVectorElementType();
12670 ShAmt *= EltVT.getSizeInBits();
12671 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12674 bool Commuted = false;
12675 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12676 // 1,1,1,1 -> v8i16 though.
12677 BitVector UndefElements;
12678 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12679 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12681 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12682 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12685 // Canonicalize the splat or undef, if present, to be on the RHS.
12686 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12687 CommuteVectorShuffleMask(M, NumElems);
12689 std::swap(V1IsSplat, V2IsSplat);
12693 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12694 // Shuffling low element of v1 into undef, just return v1.
12697 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12698 // the instruction selector will not match, so get a canonical MOVL with
12699 // swapped operands to undo the commute.
12700 return getMOVL(DAG, dl, VT, V2, V1);
12703 if (isUNPCKLMask(M, VT, HasInt256))
12704 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12706 if (isUNPCKHMask(M, VT, HasInt256))
12707 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12710 // Normalize mask so all entries that point to V2 points to its first
12711 // element then try to match unpck{h|l} again. If match, return a
12712 // new vector_shuffle with the corrected mask.p
12713 SmallVector<int, 8> NewMask(M.begin(), M.end());
12714 NormalizeMask(NewMask, NumElems);
12715 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12716 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12717 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12718 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12722 // Commute is back and try unpck* again.
12723 // FIXME: this seems wrong.
12724 CommuteVectorShuffleMask(M, NumElems);
12726 std::swap(V1IsSplat, V2IsSplat);
12728 if (isUNPCKLMask(M, VT, HasInt256))
12729 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12731 if (isUNPCKHMask(M, VT, HasInt256))
12732 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12735 // Normalize the node to match x86 shuffle ops if needed
12736 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12737 return DAG.getCommutedVectorShuffle(*SVOp);
12739 // The checks below are all present in isShuffleMaskLegal, but they are
12740 // inlined here right now to enable us to directly emit target specific
12741 // nodes, and remove one by one until they don't return Op anymore.
12743 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12744 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12745 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12746 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12749 if (isPSHUFHWMask(M, VT, HasInt256))
12750 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12751 getShufflePSHUFHWImmediate(SVOp),
12754 if (isPSHUFLWMask(M, VT, HasInt256))
12755 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12756 getShufflePSHUFLWImmediate(SVOp),
12759 unsigned MaskValue;
12760 if (isBlendMask(M, VT, Subtarget->hasSSE41(), Subtarget->hasInt256(),
12762 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12764 if (isSHUFPMask(M, VT))
12765 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12766 getShuffleSHUFImmediate(SVOp), DAG);
12768 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12769 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12770 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12771 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12773 //===--------------------------------------------------------------------===//
12774 // Generate target specific nodes for 128 or 256-bit shuffles only
12775 // supported in the AVX instruction set.
12778 // Handle VMOVDDUPY permutations
12779 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12780 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12782 // Handle VPERMILPS/D* permutations
12783 if (isVPERMILPMask(M, VT)) {
12784 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12785 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12786 getShuffleSHUFImmediate(SVOp), DAG);
12787 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12788 getShuffleSHUFImmediate(SVOp), DAG);
12792 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12793 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12794 Idx*(NumElems/2), DAG, dl);
12796 // Handle VPERM2F128/VPERM2I128 permutations
12797 if (isVPERM2X128Mask(M, VT, HasFp256))
12798 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12799 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12801 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12802 return getINSERTPS(SVOp, dl, DAG);
12805 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12806 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12808 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12809 VT.is512BitVector()) {
12810 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12811 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12812 SmallVector<SDValue, 16> permclMask;
12813 for (unsigned i = 0; i != NumElems; ++i) {
12814 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12817 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12819 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12820 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12821 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12822 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12823 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12826 //===--------------------------------------------------------------------===//
12827 // Since no target specific shuffle was selected for this generic one,
12828 // lower it into other known shuffles. FIXME: this isn't true yet, but
12829 // this is the plan.
12832 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12833 if (VT == MVT::v8i16) {
12834 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12835 if (NewOp.getNode())
12839 if (VT == MVT::v16i16 && Subtarget->hasInt256()) {
12840 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12841 if (NewOp.getNode())
12845 if (VT == MVT::v16i8) {
12846 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12847 if (NewOp.getNode())
12851 if (VT == MVT::v32i8) {
12852 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12853 if (NewOp.getNode())
12857 // Handle all 128-bit wide vectors with 4 elements, and match them with
12858 // several different shuffle types.
12859 if (NumElems == 4 && VT.is128BitVector())
12860 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12862 // Handle general 256-bit shuffles
12863 if (VT.is256BitVector())
12864 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12869 // This function assumes its argument is a BUILD_VECTOR of constants or
12870 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12872 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12873 unsigned &MaskValue) {
12875 unsigned NumElems = BuildVector->getNumOperands();
12876 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12877 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12878 unsigned NumElemsInLane = NumElems / NumLanes;
12880 // Blend for v16i16 should be symetric for the both lanes.
12881 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12882 SDValue EltCond = BuildVector->getOperand(i);
12883 SDValue SndLaneEltCond =
12884 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
12886 int Lane1Cond = -1, Lane2Cond = -1;
12887 if (isa<ConstantSDNode>(EltCond))
12888 Lane1Cond = !isZero(EltCond);
12889 if (isa<ConstantSDNode>(SndLaneEltCond))
12890 Lane2Cond = !isZero(SndLaneEltCond);
12892 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
12893 // Lane1Cond != 0, means we want the first argument.
12894 // Lane1Cond == 0, means we want the second argument.
12895 // The encoding of this argument is 0 for the first argument, 1
12896 // for the second. Therefore, invert the condition.
12897 MaskValue |= !Lane1Cond << i;
12898 else if (Lane1Cond < 0)
12899 MaskValue |= !Lane2Cond << i;
12906 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
12908 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
12909 SelectionDAG &DAG) {
12910 SDValue Cond = Op.getOperand(0);
12911 SDValue LHS = Op.getOperand(1);
12912 SDValue RHS = Op.getOperand(2);
12914 MVT VT = Op.getSimpleValueType();
12915 MVT EltVT = VT.getVectorElementType();
12916 unsigned NumElems = VT.getVectorNumElements();
12918 // There is no blend with immediate in AVX-512.
12919 if (VT.is512BitVector())
12922 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
12924 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
12927 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
12930 // Check the mask for BLEND and build the value.
12931 unsigned MaskValue = 0;
12932 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
12935 // Convert i32 vectors to floating point if it is not AVX2.
12936 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
12938 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
12939 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
12941 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
12942 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
12945 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
12946 DAG.getConstant(MaskValue, MVT::i32));
12947 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
12950 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
12951 // A vselect where all conditions and data are constants can be optimized into
12952 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
12953 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
12954 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
12955 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
12958 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
12959 if (BlendOp.getNode())
12962 // Some types for vselect were previously set to Expand, not Legal or
12963 // Custom. Return an empty SDValue so we fall-through to Expand, after
12964 // the Custom lowering phase.
12965 MVT VT = Op.getSimpleValueType();
12966 switch (VT.SimpleTy) {
12971 if (Subtarget->hasBWI() && Subtarget->hasVLX())
12976 // We couldn't create a "Blend with immediate" node.
12977 // This node should still be legal, but we'll have to emit a blendv*
12982 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
12983 MVT VT = Op.getSimpleValueType();
12986 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
12989 if (VT.getSizeInBits() == 8) {
12990 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
12991 Op.getOperand(0), Op.getOperand(1));
12992 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12993 DAG.getValueType(VT));
12994 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12997 if (VT.getSizeInBits() == 16) {
12998 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12999 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13001 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13002 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13003 DAG.getNode(ISD::BITCAST, dl,
13006 Op.getOperand(1)));
13007 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13008 Op.getOperand(0), Op.getOperand(1));
13009 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13010 DAG.getValueType(VT));
13011 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13014 if (VT == MVT::f32) {
13015 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13016 // the result back to FR32 register. It's only worth matching if the
13017 // result has a single use which is a store or a bitcast to i32. And in
13018 // the case of a store, it's not worth it if the index is a constant 0,
13019 // because a MOVSSmr can be used instead, which is smaller and faster.
13020 if (!Op.hasOneUse())
13022 SDNode *User = *Op.getNode()->use_begin();
13023 if ((User->getOpcode() != ISD::STORE ||
13024 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13025 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13026 (User->getOpcode() != ISD::BITCAST ||
13027 User->getValueType(0) != MVT::i32))
13029 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13030 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13033 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13036 if (VT == MVT::i32 || VT == MVT::i64) {
13037 // ExtractPS/pextrq works with constant index.
13038 if (isa<ConstantSDNode>(Op.getOperand(1)))
13044 /// Extract one bit from mask vector, like v16i1 or v8i1.
13045 /// AVX-512 feature.
13047 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13048 SDValue Vec = Op.getOperand(0);
13050 MVT VecVT = Vec.getSimpleValueType();
13051 SDValue Idx = Op.getOperand(1);
13052 MVT EltVT = Op.getSimpleValueType();
13054 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13055 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13056 "Unexpected vector type in ExtractBitFromMaskVector");
13058 // variable index can't be handled in mask registers,
13059 // extend vector to VR512
13060 if (!isa<ConstantSDNode>(Idx)) {
13061 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13062 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13063 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13064 ExtVT.getVectorElementType(), Ext, Idx);
13065 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13068 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13069 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13070 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13071 rc = getRegClassFor(MVT::v16i1);
13072 unsigned MaxSift = rc->getSize()*8 - 1;
13073 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13074 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13075 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13076 DAG.getConstant(MaxSift, MVT::i8));
13077 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13078 DAG.getIntPtrConstant(0));
13082 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13083 SelectionDAG &DAG) const {
13085 SDValue Vec = Op.getOperand(0);
13086 MVT VecVT = Vec.getSimpleValueType();
13087 SDValue Idx = Op.getOperand(1);
13089 if (Op.getSimpleValueType() == MVT::i1)
13090 return ExtractBitFromMaskVector(Op, DAG);
13092 if (!isa<ConstantSDNode>(Idx)) {
13093 if (VecVT.is512BitVector() ||
13094 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13095 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13098 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13099 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13100 MaskEltVT.getSizeInBits());
13102 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13103 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13104 getZeroVector(MaskVT, Subtarget, DAG, dl),
13105 Idx, DAG.getConstant(0, getPointerTy()));
13106 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13107 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13108 Perm, DAG.getConstant(0, getPointerTy()));
13113 // If this is a 256-bit vector result, first extract the 128-bit vector and
13114 // then extract the element from the 128-bit vector.
13115 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13117 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13118 // Get the 128-bit vector.
13119 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13120 MVT EltVT = VecVT.getVectorElementType();
13122 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13124 //if (IdxVal >= NumElems/2)
13125 // IdxVal -= NumElems/2;
13126 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13127 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13128 DAG.getConstant(IdxVal, MVT::i32));
13131 assert(VecVT.is128BitVector() && "Unexpected vector length");
13133 if (Subtarget->hasSSE41()) {
13134 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13139 MVT VT = Op.getSimpleValueType();
13140 // TODO: handle v16i8.
13141 if (VT.getSizeInBits() == 16) {
13142 SDValue Vec = Op.getOperand(0);
13143 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13145 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13146 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13147 DAG.getNode(ISD::BITCAST, dl,
13149 Op.getOperand(1)));
13150 // Transform it so it match pextrw which produces a 32-bit result.
13151 MVT EltVT = MVT::i32;
13152 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13153 Op.getOperand(0), Op.getOperand(1));
13154 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13155 DAG.getValueType(VT));
13156 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13159 if (VT.getSizeInBits() == 32) {
13160 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13164 // SHUFPS the element to the lowest double word, then movss.
13165 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13166 MVT VVT = Op.getOperand(0).getSimpleValueType();
13167 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13168 DAG.getUNDEF(VVT), Mask);
13169 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13170 DAG.getIntPtrConstant(0));
13173 if (VT.getSizeInBits() == 64) {
13174 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13175 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13176 // to match extract_elt for f64.
13177 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13181 // UNPCKHPD the element to the lowest double word, then movsd.
13182 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13183 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13184 int Mask[2] = { 1, -1 };
13185 MVT VVT = Op.getOperand(0).getSimpleValueType();
13186 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13187 DAG.getUNDEF(VVT), Mask);
13188 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13189 DAG.getIntPtrConstant(0));
13195 /// Insert one bit to mask vector, like v16i1 or v8i1.
13196 /// AVX-512 feature.
13198 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13200 SDValue Vec = Op.getOperand(0);
13201 SDValue Elt = Op.getOperand(1);
13202 SDValue Idx = Op.getOperand(2);
13203 MVT VecVT = Vec.getSimpleValueType();
13205 if (!isa<ConstantSDNode>(Idx)) {
13206 // Non constant index. Extend source and destination,
13207 // insert element and then truncate the result.
13208 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13209 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13210 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13211 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13212 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13213 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13216 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13217 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13218 if (Vec.getOpcode() == ISD::UNDEF)
13219 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13220 DAG.getConstant(IdxVal, MVT::i8));
13221 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13222 unsigned MaxSift = rc->getSize()*8 - 1;
13223 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13224 DAG.getConstant(MaxSift, MVT::i8));
13225 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13226 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13227 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13230 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13231 SelectionDAG &DAG) const {
13232 MVT VT = Op.getSimpleValueType();
13233 MVT EltVT = VT.getVectorElementType();
13235 if (EltVT == MVT::i1)
13236 return InsertBitToMaskVector(Op, DAG);
13239 SDValue N0 = Op.getOperand(0);
13240 SDValue N1 = Op.getOperand(1);
13241 SDValue N2 = Op.getOperand(2);
13242 if (!isa<ConstantSDNode>(N2))
13244 auto *N2C = cast<ConstantSDNode>(N2);
13245 unsigned IdxVal = N2C->getZExtValue();
13247 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13248 // into that, and then insert the subvector back into the result.
13249 if (VT.is256BitVector() || VT.is512BitVector()) {
13250 // Get the desired 128-bit vector half.
13251 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13253 // Insert the element into the desired half.
13254 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13255 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13257 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13258 DAG.getConstant(IdxIn128, MVT::i32));
13260 // Insert the changed part back to the 256-bit vector
13261 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13263 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13265 if (Subtarget->hasSSE41()) {
13266 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13268 if (VT == MVT::v8i16) {
13269 Opc = X86ISD::PINSRW;
13271 assert(VT == MVT::v16i8);
13272 Opc = X86ISD::PINSRB;
13275 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13277 if (N1.getValueType() != MVT::i32)
13278 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13279 if (N2.getValueType() != MVT::i32)
13280 N2 = DAG.getIntPtrConstant(IdxVal);
13281 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13284 if (EltVT == MVT::f32) {
13285 // Bits [7:6] of the constant are the source select. This will always be
13286 // zero here. The DAG Combiner may combine an extract_elt index into
13288 // bits. For example (insert (extract, 3), 2) could be matched by
13290 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13291 // Bits [5:4] of the constant are the destination select. This is the
13292 // value of the incoming immediate.
13293 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13294 // combine either bitwise AND or insert of float 0.0 to set these bits.
13295 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13296 // Create this as a scalar to vector..
13297 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13298 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13301 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13302 // PINSR* works with constant index.
13307 if (EltVT == MVT::i8)
13310 if (EltVT.getSizeInBits() == 16) {
13311 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13312 // as its second argument.
13313 if (N1.getValueType() != MVT::i32)
13314 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13315 if (N2.getValueType() != MVT::i32)
13316 N2 = DAG.getIntPtrConstant(IdxVal);
13317 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13322 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13324 MVT OpVT = Op.getSimpleValueType();
13326 // If this is a 256-bit vector result, first insert into a 128-bit
13327 // vector and then insert into the 256-bit vector.
13328 if (!OpVT.is128BitVector()) {
13329 // Insert into a 128-bit vector.
13330 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13331 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13332 OpVT.getVectorNumElements() / SizeFactor);
13334 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13336 // Insert the 128-bit vector.
13337 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13340 if (OpVT == MVT::v1i64 &&
13341 Op.getOperand(0).getValueType() == MVT::i64)
13342 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13344 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13345 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13346 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13347 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13350 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13351 // a simple subregister reference or explicit instructions to grab
13352 // upper bits of a vector.
13353 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13354 SelectionDAG &DAG) {
13356 SDValue In = Op.getOperand(0);
13357 SDValue Idx = Op.getOperand(1);
13358 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13359 MVT ResVT = Op.getSimpleValueType();
13360 MVT InVT = In.getSimpleValueType();
13362 if (Subtarget->hasFp256()) {
13363 if (ResVT.is128BitVector() &&
13364 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13365 isa<ConstantSDNode>(Idx)) {
13366 return Extract128BitVector(In, IdxVal, DAG, dl);
13368 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13369 isa<ConstantSDNode>(Idx)) {
13370 return Extract256BitVector(In, IdxVal, DAG, dl);
13376 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13377 // simple superregister reference or explicit instructions to insert
13378 // the upper bits of a vector.
13379 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13380 SelectionDAG &DAG) {
13381 if (!Subtarget->hasAVX())
13385 SDValue Vec = Op.getOperand(0);
13386 SDValue SubVec = Op.getOperand(1);
13387 SDValue Idx = Op.getOperand(2);
13389 if (!isa<ConstantSDNode>(Idx))
13392 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13393 MVT OpVT = Op.getSimpleValueType();
13394 MVT SubVecVT = SubVec.getSimpleValueType();
13396 // Fold two 16-byte subvector loads into one 32-byte load:
13397 // (insert_subvector (insert_subvector undef, (load addr), 0),
13398 // (load addr + 16), Elts/2)
13400 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13401 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13402 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13403 !Subtarget->isUnalignedMem32Slow()) {
13404 SDValue SubVec2 = Vec.getOperand(1);
13405 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13406 if (Idx2->getZExtValue() == 0) {
13407 SDValue Ops[] = { SubVec2, SubVec };
13408 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13415 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13416 SubVecVT.is128BitVector())
13417 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13419 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13420 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13425 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13426 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13427 // one of the above mentioned nodes. It has to be wrapped because otherwise
13428 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13429 // be used to form addressing mode. These wrapped nodes will be selected
13432 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13433 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13435 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13436 // global base reg.
13437 unsigned char OpFlag = 0;
13438 unsigned WrapperKind = X86ISD::Wrapper;
13439 CodeModel::Model M = DAG.getTarget().getCodeModel();
13441 if (Subtarget->isPICStyleRIPRel() &&
13442 (M == CodeModel::Small || M == CodeModel::Kernel))
13443 WrapperKind = X86ISD::WrapperRIP;
13444 else if (Subtarget->isPICStyleGOT())
13445 OpFlag = X86II::MO_GOTOFF;
13446 else if (Subtarget->isPICStyleStubPIC())
13447 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13449 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13450 CP->getAlignment(),
13451 CP->getOffset(), OpFlag);
13453 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13454 // With PIC, the address is actually $g + Offset.
13456 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13457 DAG.getNode(X86ISD::GlobalBaseReg,
13458 SDLoc(), getPointerTy()),
13465 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13466 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13468 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13469 // global base reg.
13470 unsigned char OpFlag = 0;
13471 unsigned WrapperKind = X86ISD::Wrapper;
13472 CodeModel::Model M = DAG.getTarget().getCodeModel();
13474 if (Subtarget->isPICStyleRIPRel() &&
13475 (M == CodeModel::Small || M == CodeModel::Kernel))
13476 WrapperKind = X86ISD::WrapperRIP;
13477 else if (Subtarget->isPICStyleGOT())
13478 OpFlag = X86II::MO_GOTOFF;
13479 else if (Subtarget->isPICStyleStubPIC())
13480 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13482 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13485 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13487 // With PIC, the address is actually $g + Offset.
13489 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13490 DAG.getNode(X86ISD::GlobalBaseReg,
13491 SDLoc(), getPointerTy()),
13498 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13499 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13501 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13502 // global base reg.
13503 unsigned char OpFlag = 0;
13504 unsigned WrapperKind = X86ISD::Wrapper;
13505 CodeModel::Model M = DAG.getTarget().getCodeModel();
13507 if (Subtarget->isPICStyleRIPRel() &&
13508 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13509 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13510 OpFlag = X86II::MO_GOTPCREL;
13511 WrapperKind = X86ISD::WrapperRIP;
13512 } else if (Subtarget->isPICStyleGOT()) {
13513 OpFlag = X86II::MO_GOT;
13514 } else if (Subtarget->isPICStyleStubPIC()) {
13515 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13516 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13517 OpFlag = X86II::MO_DARWIN_NONLAZY;
13520 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13523 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13525 // With PIC, the address is actually $g + Offset.
13526 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13527 !Subtarget->is64Bit()) {
13528 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13529 DAG.getNode(X86ISD::GlobalBaseReg,
13530 SDLoc(), getPointerTy()),
13534 // For symbols that require a load from a stub to get the address, emit the
13536 if (isGlobalStubReference(OpFlag))
13537 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13538 MachinePointerInfo::getGOT(), false, false, false, 0);
13544 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13545 // Create the TargetBlockAddressAddress node.
13546 unsigned char OpFlags =
13547 Subtarget->ClassifyBlockAddressReference();
13548 CodeModel::Model M = DAG.getTarget().getCodeModel();
13549 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13550 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13552 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13555 if (Subtarget->isPICStyleRIPRel() &&
13556 (M == CodeModel::Small || M == CodeModel::Kernel))
13557 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13559 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13561 // With PIC, the address is actually $g + Offset.
13562 if (isGlobalRelativeToPICBase(OpFlags)) {
13563 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13564 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13572 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13573 int64_t Offset, SelectionDAG &DAG) const {
13574 // Create the TargetGlobalAddress node, folding in the constant
13575 // offset if it is legal.
13576 unsigned char OpFlags =
13577 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13578 CodeModel::Model M = DAG.getTarget().getCodeModel();
13580 if (OpFlags == X86II::MO_NO_FLAG &&
13581 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13582 // A direct static reference to a global.
13583 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13586 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13589 if (Subtarget->isPICStyleRIPRel() &&
13590 (M == CodeModel::Small || M == CodeModel::Kernel))
13591 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13593 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13595 // With PIC, the address is actually $g + Offset.
13596 if (isGlobalRelativeToPICBase(OpFlags)) {
13597 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13598 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13602 // For globals that require a load from a stub to get the address, emit the
13604 if (isGlobalStubReference(OpFlags))
13605 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13606 MachinePointerInfo::getGOT(), false, false, false, 0);
13608 // If there was a non-zero offset that we didn't fold, create an explicit
13609 // addition for it.
13611 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13612 DAG.getConstant(Offset, getPointerTy()));
13618 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13619 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13620 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13621 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13625 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13626 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13627 unsigned char OperandFlags, bool LocalDynamic = false) {
13628 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13629 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13631 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13632 GA->getValueType(0),
13636 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13640 SDValue Ops[] = { Chain, TGA, *InFlag };
13641 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13643 SDValue Ops[] = { Chain, TGA };
13644 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13647 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13648 MFI->setAdjustsStack(true);
13649 MFI->setHasCalls(true);
13651 SDValue Flag = Chain.getValue(1);
13652 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13655 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13657 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13660 SDLoc dl(GA); // ? function entry point might be better
13661 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13662 DAG.getNode(X86ISD::GlobalBaseReg,
13663 SDLoc(), PtrVT), InFlag);
13664 InFlag = Chain.getValue(1);
13666 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13669 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13671 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13673 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13674 X86::RAX, X86II::MO_TLSGD);
13677 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13683 // Get the start address of the TLS block for this module.
13684 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13685 .getInfo<X86MachineFunctionInfo>();
13686 MFI->incNumLocalDynamicTLSAccesses();
13690 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13691 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13694 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13695 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13696 InFlag = Chain.getValue(1);
13697 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13698 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13701 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13705 unsigned char OperandFlags = X86II::MO_DTPOFF;
13706 unsigned WrapperKind = X86ISD::Wrapper;
13707 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13708 GA->getValueType(0),
13709 GA->getOffset(), OperandFlags);
13710 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13712 // Add x@dtpoff with the base.
13713 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13716 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13717 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13718 const EVT PtrVT, TLSModel::Model model,
13719 bool is64Bit, bool isPIC) {
13722 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13723 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13724 is64Bit ? 257 : 256));
13726 SDValue ThreadPointer =
13727 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13728 MachinePointerInfo(Ptr), false, false, false, 0);
13730 unsigned char OperandFlags = 0;
13731 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13733 unsigned WrapperKind = X86ISD::Wrapper;
13734 if (model == TLSModel::LocalExec) {
13735 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13736 } else if (model == TLSModel::InitialExec) {
13738 OperandFlags = X86II::MO_GOTTPOFF;
13739 WrapperKind = X86ISD::WrapperRIP;
13741 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13744 llvm_unreachable("Unexpected model");
13747 // emit "addl x@ntpoff,%eax" (local exec)
13748 // or "addl x@indntpoff,%eax" (initial exec)
13749 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13751 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13752 GA->getOffset(), OperandFlags);
13753 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13755 if (model == TLSModel::InitialExec) {
13756 if (isPIC && !is64Bit) {
13757 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13758 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13762 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13763 MachinePointerInfo::getGOT(), false, false, false, 0);
13766 // The address of the thread local variable is the add of the thread
13767 // pointer with the offset of the variable.
13768 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13772 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13774 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13775 const GlobalValue *GV = GA->getGlobal();
13777 if (Subtarget->isTargetELF()) {
13778 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13781 case TLSModel::GeneralDynamic:
13782 if (Subtarget->is64Bit())
13783 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13784 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13785 case TLSModel::LocalDynamic:
13786 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13787 Subtarget->is64Bit());
13788 case TLSModel::InitialExec:
13789 case TLSModel::LocalExec:
13790 return LowerToTLSExecModel(
13791 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13792 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13794 llvm_unreachable("Unknown TLS model.");
13797 if (Subtarget->isTargetDarwin()) {
13798 // Darwin only has one model of TLS. Lower to that.
13799 unsigned char OpFlag = 0;
13800 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13801 X86ISD::WrapperRIP : X86ISD::Wrapper;
13803 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13804 // global base reg.
13805 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13806 !Subtarget->is64Bit();
13808 OpFlag = X86II::MO_TLVP_PIC_BASE;
13810 OpFlag = X86II::MO_TLVP;
13812 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13813 GA->getValueType(0),
13814 GA->getOffset(), OpFlag);
13815 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13817 // With PIC32, the address is actually $g + Offset.
13819 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13820 DAG.getNode(X86ISD::GlobalBaseReg,
13821 SDLoc(), getPointerTy()),
13824 // Lowering the machine isd will make sure everything is in the right
13826 SDValue Chain = DAG.getEntryNode();
13827 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13828 SDValue Args[] = { Chain, Offset };
13829 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13831 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13832 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13833 MFI->setAdjustsStack(true);
13835 // And our return value (tls address) is in the standard call return value
13837 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13838 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13839 Chain.getValue(1));
13842 if (Subtarget->isTargetKnownWindowsMSVC() ||
13843 Subtarget->isTargetWindowsGNU()) {
13844 // Just use the implicit TLS architecture
13845 // Need to generate someting similar to:
13846 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13848 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13849 // mov rcx, qword [rdx+rcx*8]
13850 // mov eax, .tls$:tlsvar
13851 // [rax+rcx] contains the address
13852 // Windows 64bit: gs:0x58
13853 // Windows 32bit: fs:__tls_array
13856 SDValue Chain = DAG.getEntryNode();
13858 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13859 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13860 // use its literal value of 0x2C.
13861 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13862 ? Type::getInt8PtrTy(*DAG.getContext(),
13864 : Type::getInt32PtrTy(*DAG.getContext(),
13868 Subtarget->is64Bit()
13869 ? DAG.getIntPtrConstant(0x58)
13870 : (Subtarget->isTargetWindowsGNU()
13871 ? DAG.getIntPtrConstant(0x2C)
13872 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13874 SDValue ThreadPointer =
13875 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13876 MachinePointerInfo(Ptr), false, false, false, 0);
13878 // Load the _tls_index variable
13879 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13880 if (Subtarget->is64Bit())
13881 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13882 IDX, MachinePointerInfo(), MVT::i32,
13883 false, false, false, 0);
13885 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13886 false, false, false, 0);
13888 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13890 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
13892 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
13893 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
13894 false, false, false, 0);
13896 // Get the offset of start of .tls section
13897 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13898 GA->getValueType(0),
13899 GA->getOffset(), X86II::MO_SECREL);
13900 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
13902 // The address of the thread local variable is the add of the thread
13903 // pointer with the offset of the variable.
13904 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
13907 llvm_unreachable("TLS not implemented for this target.");
13910 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
13911 /// and take a 2 x i32 value to shift plus a shift amount.
13912 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
13913 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
13914 MVT VT = Op.getSimpleValueType();
13915 unsigned VTBits = VT.getSizeInBits();
13917 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
13918 SDValue ShOpLo = Op.getOperand(0);
13919 SDValue ShOpHi = Op.getOperand(1);
13920 SDValue ShAmt = Op.getOperand(2);
13921 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
13922 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
13924 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13925 DAG.getConstant(VTBits - 1, MVT::i8));
13926 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
13927 DAG.getConstant(VTBits - 1, MVT::i8))
13928 : DAG.getConstant(0, VT);
13930 SDValue Tmp2, Tmp3;
13931 if (Op.getOpcode() == ISD::SHL_PARTS) {
13932 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
13933 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
13935 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
13936 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
13939 // If the shift amount is larger or equal than the width of a part we can't
13940 // rely on the results of shld/shrd. Insert a test and select the appropriate
13941 // values for large shift amounts.
13942 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13943 DAG.getConstant(VTBits, MVT::i8));
13944 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13945 AndNode, DAG.getConstant(0, MVT::i8));
13948 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13949 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
13950 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
13952 if (Op.getOpcode() == ISD::SHL_PARTS) {
13953 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13954 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13956 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13957 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13960 SDValue Ops[2] = { Lo, Hi };
13961 return DAG.getMergeValues(Ops, dl);
13964 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
13965 SelectionDAG &DAG) const {
13966 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
13969 if (SrcVT.isVector()) {
13970 if (SrcVT.getVectorElementType() == MVT::i1) {
13971 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
13972 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13973 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
13974 Op.getOperand(0)));
13979 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
13980 "Unknown SINT_TO_FP to lower!");
13982 // These are really Legal; return the operand so the caller accepts it as
13984 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
13986 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
13987 Subtarget->is64Bit()) {
13991 unsigned Size = SrcVT.getSizeInBits()/8;
13992 MachineFunction &MF = DAG.getMachineFunction();
13993 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
13994 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13995 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13997 MachinePointerInfo::getFixedStack(SSFI),
13999 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14002 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14004 SelectionDAG &DAG) const {
14008 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14010 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14012 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14014 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14016 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14017 MachineMemOperand *MMO;
14019 int SSFI = FI->getIndex();
14021 DAG.getMachineFunction()
14022 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14023 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14025 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14026 StackSlot = StackSlot.getOperand(1);
14028 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14029 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14031 Tys, Ops, SrcVT, MMO);
14034 Chain = Result.getValue(1);
14035 SDValue InFlag = Result.getValue(2);
14037 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14038 // shouldn't be necessary except that RFP cannot be live across
14039 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14040 MachineFunction &MF = DAG.getMachineFunction();
14041 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14042 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14043 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14044 Tys = DAG.getVTList(MVT::Other);
14046 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14048 MachineMemOperand *MMO =
14049 DAG.getMachineFunction()
14050 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14051 MachineMemOperand::MOStore, SSFISize, SSFISize);
14053 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14054 Ops, Op.getValueType(), MMO);
14055 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14056 MachinePointerInfo::getFixedStack(SSFI),
14057 false, false, false, 0);
14063 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14064 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14065 SelectionDAG &DAG) const {
14066 // This algorithm is not obvious. Here it is what we're trying to output:
14069 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14070 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14072 haddpd %xmm0, %xmm0
14074 pshufd $0x4e, %xmm0, %xmm1
14080 LLVMContext *Context = DAG.getContext();
14082 // Build some magic constants.
14083 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14084 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14085 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14087 SmallVector<Constant*,2> CV1;
14089 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14090 APInt(64, 0x4330000000000000ULL))));
14092 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14093 APInt(64, 0x4530000000000000ULL))));
14094 Constant *C1 = ConstantVector::get(CV1);
14095 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14097 // Load the 64-bit value into an XMM register.
14098 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14100 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14101 MachinePointerInfo::getConstantPool(),
14102 false, false, false, 16);
14103 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14104 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14107 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14108 MachinePointerInfo::getConstantPool(),
14109 false, false, false, 16);
14110 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14111 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14114 if (Subtarget->hasSSE3()) {
14115 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14116 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14118 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14119 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14121 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14122 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14126 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14127 DAG.getIntPtrConstant(0));
14130 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14131 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14132 SelectionDAG &DAG) const {
14134 // FP constant to bias correct the final result.
14135 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14138 // Load the 32-bit value into an XMM register.
14139 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14142 // Zero out the upper parts of the register.
14143 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14145 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14146 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14147 DAG.getIntPtrConstant(0));
14149 // Or the load with the bias.
14150 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14151 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14152 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14153 MVT::v2f64, Load)),
14154 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14155 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14156 MVT::v2f64, Bias)));
14157 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14158 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14159 DAG.getIntPtrConstant(0));
14161 // Subtract the bias.
14162 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14164 // Handle final rounding.
14165 EVT DestVT = Op.getValueType();
14167 if (DestVT.bitsLT(MVT::f64))
14168 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14169 DAG.getIntPtrConstant(0));
14170 if (DestVT.bitsGT(MVT::f64))
14171 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14173 // Handle final rounding.
14177 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14178 const X86Subtarget &Subtarget) {
14179 // The algorithm is the following:
14180 // #ifdef __SSE4_1__
14181 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14182 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14183 // (uint4) 0x53000000, 0xaa);
14185 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14186 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14188 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14189 // return (float4) lo + fhi;
14192 SDValue V = Op->getOperand(0);
14193 EVT VecIntVT = V.getValueType();
14194 bool Is128 = VecIntVT == MVT::v4i32;
14195 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14196 // If we convert to something else than the supported type, e.g., to v4f64,
14198 if (VecFloatVT != Op->getValueType(0))
14201 unsigned NumElts = VecIntVT.getVectorNumElements();
14202 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14203 "Unsupported custom type");
14204 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14206 // In the #idef/#else code, we have in common:
14207 // - The vector of constants:
14213 // Create the splat vector for 0x4b000000.
14214 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14215 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14216 CstLow, CstLow, CstLow, CstLow};
14217 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14218 makeArrayRef(&CstLowArray[0], NumElts));
14219 // Create the splat vector for 0x53000000.
14220 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14221 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14222 CstHigh, CstHigh, CstHigh, CstHigh};
14223 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14224 makeArrayRef(&CstHighArray[0], NumElts));
14226 // Create the right shift.
14227 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14228 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14229 CstShift, CstShift, CstShift, CstShift};
14230 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14231 makeArrayRef(&CstShiftArray[0], NumElts));
14232 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14235 if (Subtarget.hasSSE41()) {
14236 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14237 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14238 SDValue VecCstLowBitcast =
14239 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14240 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14241 // Low will be bitcasted right away, so do not bother bitcasting back to its
14243 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14244 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14245 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14246 // (uint4) 0x53000000, 0xaa);
14247 SDValue VecCstHighBitcast =
14248 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14249 SDValue VecShiftBitcast =
14250 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14251 // High will be bitcasted right away, so do not bother bitcasting back to
14252 // its original type.
14253 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14254 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14256 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14257 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14258 CstMask, CstMask, CstMask);
14259 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14260 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14261 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14263 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14264 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14267 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14268 SDValue CstFAdd = DAG.getConstantFP(
14269 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14270 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14271 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14272 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14273 makeArrayRef(&CstFAddArray[0], NumElts));
14275 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14276 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14278 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14279 // return (float4) lo + fhi;
14280 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14281 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14284 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14285 SelectionDAG &DAG) const {
14286 SDValue N0 = Op.getOperand(0);
14287 MVT SVT = N0.getSimpleValueType();
14290 switch (SVT.SimpleTy) {
14292 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14297 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14298 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14299 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14303 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14305 llvm_unreachable(nullptr);
14308 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14309 SelectionDAG &DAG) const {
14310 SDValue N0 = Op.getOperand(0);
14313 if (Op.getValueType().isVector())
14314 return lowerUINT_TO_FP_vec(Op, DAG);
14316 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14317 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14318 // the optimization here.
14319 if (DAG.SignBitIsZero(N0))
14320 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14322 MVT SrcVT = N0.getSimpleValueType();
14323 MVT DstVT = Op.getSimpleValueType();
14324 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14325 return LowerUINT_TO_FP_i64(Op, DAG);
14326 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14327 return LowerUINT_TO_FP_i32(Op, DAG);
14328 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14331 // Make a 64-bit buffer, and use it to build an FILD.
14332 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14333 if (SrcVT == MVT::i32) {
14334 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14335 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14336 getPointerTy(), StackSlot, WordOff);
14337 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14338 StackSlot, MachinePointerInfo(),
14340 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14341 OffsetSlot, MachinePointerInfo(),
14343 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14347 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14348 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14349 StackSlot, MachinePointerInfo(),
14351 // For i64 source, we need to add the appropriate power of 2 if the input
14352 // was negative. This is the same as the optimization in
14353 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14354 // we must be careful to do the computation in x87 extended precision, not
14355 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14356 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14357 MachineMemOperand *MMO =
14358 DAG.getMachineFunction()
14359 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14360 MachineMemOperand::MOLoad, 8, 8);
14362 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14363 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14364 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14367 APInt FF(32, 0x5F800000ULL);
14369 // Check whether the sign bit is set.
14370 SDValue SignSet = DAG.getSetCC(dl,
14371 getSetCCResultType(*DAG.getContext(), MVT::i64),
14372 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14375 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14376 SDValue FudgePtr = DAG.getConstantPool(
14377 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14380 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14381 SDValue Zero = DAG.getIntPtrConstant(0);
14382 SDValue Four = DAG.getIntPtrConstant(4);
14383 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14385 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14387 // Load the value out, extending it from f32 to f80.
14388 // FIXME: Avoid the extend by constructing the right constant pool?
14389 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14390 FudgePtr, MachinePointerInfo::getConstantPool(),
14391 MVT::f32, false, false, false, 4);
14392 // Extend everything to 80 bits to force it to be done on x87.
14393 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14394 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14397 std::pair<SDValue,SDValue>
14398 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14399 bool IsSigned, bool IsReplace) const {
14402 EVT DstTy = Op.getValueType();
14404 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14405 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14409 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14410 DstTy.getSimpleVT() >= MVT::i16 &&
14411 "Unknown FP_TO_INT to lower!");
14413 // These are really Legal.
14414 if (DstTy == MVT::i32 &&
14415 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14416 return std::make_pair(SDValue(), SDValue());
14417 if (Subtarget->is64Bit() &&
14418 DstTy == MVT::i64 &&
14419 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14420 return std::make_pair(SDValue(), SDValue());
14422 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14423 // stack slot, or into the FTOL runtime function.
14424 MachineFunction &MF = DAG.getMachineFunction();
14425 unsigned MemSize = DstTy.getSizeInBits()/8;
14426 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14427 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14430 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14431 Opc = X86ISD::WIN_FTOL;
14433 switch (DstTy.getSimpleVT().SimpleTy) {
14434 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14435 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14436 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14437 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14440 SDValue Chain = DAG.getEntryNode();
14441 SDValue Value = Op.getOperand(0);
14442 EVT TheVT = Op.getOperand(0).getValueType();
14443 // FIXME This causes a redundant load/store if the SSE-class value is already
14444 // in memory, such as if it is on the callstack.
14445 if (isScalarFPTypeInSSEReg(TheVT)) {
14446 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14447 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14448 MachinePointerInfo::getFixedStack(SSFI),
14450 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14452 Chain, StackSlot, DAG.getValueType(TheVT)
14455 MachineMemOperand *MMO =
14456 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14457 MachineMemOperand::MOLoad, MemSize, MemSize);
14458 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14459 Chain = Value.getValue(1);
14460 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14461 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14464 MachineMemOperand *MMO =
14465 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14466 MachineMemOperand::MOStore, MemSize, MemSize);
14468 if (Opc != X86ISD::WIN_FTOL) {
14469 // Build the FP_TO_INT*_IN_MEM
14470 SDValue Ops[] = { Chain, Value, StackSlot };
14471 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14473 return std::make_pair(FIST, StackSlot);
14475 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14476 DAG.getVTList(MVT::Other, MVT::Glue),
14478 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14479 MVT::i32, ftol.getValue(1));
14480 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14481 MVT::i32, eax.getValue(2));
14482 SDValue Ops[] = { eax, edx };
14483 SDValue pair = IsReplace
14484 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14485 : DAG.getMergeValues(Ops, DL);
14486 return std::make_pair(pair, SDValue());
14490 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14491 const X86Subtarget *Subtarget) {
14492 MVT VT = Op->getSimpleValueType(0);
14493 SDValue In = Op->getOperand(0);
14494 MVT InVT = In.getSimpleValueType();
14497 // Optimize vectors in AVX mode:
14500 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14501 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14502 // Concat upper and lower parts.
14505 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14506 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14507 // Concat upper and lower parts.
14510 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14511 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14512 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14515 if (Subtarget->hasInt256())
14516 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14518 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14519 SDValue Undef = DAG.getUNDEF(InVT);
14520 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14521 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14522 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14524 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14525 VT.getVectorNumElements()/2);
14527 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14528 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14530 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14533 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14534 SelectionDAG &DAG) {
14535 MVT VT = Op->getSimpleValueType(0);
14536 SDValue In = Op->getOperand(0);
14537 MVT InVT = In.getSimpleValueType();
14539 unsigned int NumElts = VT.getVectorNumElements();
14540 if (NumElts != 8 && NumElts != 16)
14543 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14544 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14546 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14547 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14548 // Now we have only mask extension
14549 assert(InVT.getVectorElementType() == MVT::i1);
14550 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14551 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14552 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14553 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14554 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14555 MachinePointerInfo::getConstantPool(),
14556 false, false, false, Alignment);
14558 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14559 if (VT.is512BitVector())
14561 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14564 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14565 SelectionDAG &DAG) {
14566 if (Subtarget->hasFp256()) {
14567 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14575 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14576 SelectionDAG &DAG) {
14578 MVT VT = Op.getSimpleValueType();
14579 SDValue In = Op.getOperand(0);
14580 MVT SVT = In.getSimpleValueType();
14582 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14583 return LowerZERO_EXTEND_AVX512(Op, DAG);
14585 if (Subtarget->hasFp256()) {
14586 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14591 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14592 VT.getVectorNumElements() != SVT.getVectorNumElements());
14596 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14598 MVT VT = Op.getSimpleValueType();
14599 SDValue In = Op.getOperand(0);
14600 MVT InVT = In.getSimpleValueType();
14602 if (VT == MVT::i1) {
14603 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14604 "Invalid scalar TRUNCATE operation");
14605 if (InVT.getSizeInBits() >= 32)
14607 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14608 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14610 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14611 "Invalid TRUNCATE operation");
14613 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14614 if (VT.getVectorElementType().getSizeInBits() >=8)
14615 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14617 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14618 unsigned NumElts = InVT.getVectorNumElements();
14619 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14620 if (InVT.getSizeInBits() < 512) {
14621 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14622 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14626 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14627 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14628 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14629 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14630 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14631 MachinePointerInfo::getConstantPool(),
14632 false, false, false, Alignment);
14633 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14634 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14635 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14638 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14639 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14640 if (Subtarget->hasInt256()) {
14641 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14642 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14643 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14645 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14646 DAG.getIntPtrConstant(0));
14649 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14650 DAG.getIntPtrConstant(0));
14651 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14652 DAG.getIntPtrConstant(2));
14653 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14654 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14655 static const int ShufMask[] = {0, 2, 4, 6};
14656 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14659 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14660 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14661 if (Subtarget->hasInt256()) {
14662 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14664 SmallVector<SDValue,32> pshufbMask;
14665 for (unsigned i = 0; i < 2; ++i) {
14666 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14667 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14668 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14669 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14670 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14671 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14672 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14673 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14674 for (unsigned j = 0; j < 8; ++j)
14675 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14677 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14678 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14679 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14681 static const int ShufMask[] = {0, 2, -1, -1};
14682 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14684 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14685 DAG.getIntPtrConstant(0));
14686 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14689 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14690 DAG.getIntPtrConstant(0));
14692 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14693 DAG.getIntPtrConstant(4));
14695 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14696 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14698 // The PSHUFB mask:
14699 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14700 -1, -1, -1, -1, -1, -1, -1, -1};
14702 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14703 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14704 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14706 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14707 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14709 // The MOVLHPS Mask:
14710 static const int ShufMask2[] = {0, 1, 4, 5};
14711 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14712 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14715 // Handle truncation of V256 to V128 using shuffles.
14716 if (!VT.is128BitVector() || !InVT.is256BitVector())
14719 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14721 unsigned NumElems = VT.getVectorNumElements();
14722 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14724 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14725 // Prepare truncation shuffle mask
14726 for (unsigned i = 0; i != NumElems; ++i)
14727 MaskVec[i] = i * 2;
14728 SDValue V = DAG.getVectorShuffle(NVT, DL,
14729 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14730 DAG.getUNDEF(NVT), &MaskVec[0]);
14731 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14732 DAG.getIntPtrConstant(0));
14735 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14736 SelectionDAG &DAG) const {
14737 assert(!Op.getSimpleValueType().isVector());
14739 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14740 /*IsSigned=*/ true, /*IsReplace=*/ false);
14741 SDValue FIST = Vals.first, StackSlot = Vals.second;
14742 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14743 if (!FIST.getNode()) return Op;
14745 if (StackSlot.getNode())
14746 // Load the result.
14747 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14748 FIST, StackSlot, MachinePointerInfo(),
14749 false, false, false, 0);
14751 // The node is the result.
14755 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14756 SelectionDAG &DAG) const {
14757 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14758 /*IsSigned=*/ false, /*IsReplace=*/ false);
14759 SDValue FIST = Vals.first, StackSlot = Vals.second;
14760 assert(FIST.getNode() && "Unexpected failure");
14762 if (StackSlot.getNode())
14763 // Load the result.
14764 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14765 FIST, StackSlot, MachinePointerInfo(),
14766 false, false, false, 0);
14768 // The node is the result.
14772 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14774 MVT VT = Op.getSimpleValueType();
14775 SDValue In = Op.getOperand(0);
14776 MVT SVT = In.getSimpleValueType();
14778 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14780 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14781 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14782 In, DAG.getUNDEF(SVT)));
14785 /// The only differences between FABS and FNEG are the mask and the logic op.
14786 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14787 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14788 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14789 "Wrong opcode for lowering FABS or FNEG.");
14791 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14793 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14794 // into an FNABS. We'll lower the FABS after that if it is still in use.
14796 for (SDNode *User : Op->uses())
14797 if (User->getOpcode() == ISD::FNEG)
14800 SDValue Op0 = Op.getOperand(0);
14801 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14804 MVT VT = Op.getSimpleValueType();
14805 // Assume scalar op for initialization; update for vector if needed.
14806 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14807 // generate a 16-byte vector constant and logic op even for the scalar case.
14808 // Using a 16-byte mask allows folding the load of the mask with
14809 // the logic op, so it can save (~4 bytes) on code size.
14811 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14812 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14813 // decide if we should generate a 16-byte constant mask when we only need 4 or
14814 // 8 bytes for the scalar case.
14815 if (VT.isVector()) {
14816 EltVT = VT.getVectorElementType();
14817 NumElts = VT.getVectorNumElements();
14820 unsigned EltBits = EltVT.getSizeInBits();
14821 LLVMContext *Context = DAG.getContext();
14822 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14824 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14825 Constant *C = ConstantInt::get(*Context, MaskElt);
14826 C = ConstantVector::getSplat(NumElts, C);
14827 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14828 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14829 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14830 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14831 MachinePointerInfo::getConstantPool(),
14832 false, false, false, Alignment);
14834 if (VT.isVector()) {
14835 // For a vector, cast operands to a vector type, perform the logic op,
14836 // and cast the result back to the original value type.
14837 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14838 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14839 SDValue Operand = IsFNABS ?
14840 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14841 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14842 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14843 return DAG.getNode(ISD::BITCAST, dl, VT,
14844 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14847 // If not vector, then scalar.
14848 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14849 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14850 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14853 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14854 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14855 LLVMContext *Context = DAG.getContext();
14856 SDValue Op0 = Op.getOperand(0);
14857 SDValue Op1 = Op.getOperand(1);
14859 MVT VT = Op.getSimpleValueType();
14860 MVT SrcVT = Op1.getSimpleValueType();
14862 // If second operand is smaller, extend it first.
14863 if (SrcVT.bitsLT(VT)) {
14864 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14867 // And if it is bigger, shrink it first.
14868 if (SrcVT.bitsGT(VT)) {
14869 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14873 // At this point the operands and the result should have the same
14874 // type, and that won't be f80 since that is not custom lowered.
14876 const fltSemantics &Sem =
14877 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14878 const unsigned SizeInBits = VT.getSizeInBits();
14880 SmallVector<Constant *, 4> CV(
14881 VT == MVT::f64 ? 2 : 4,
14882 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14884 // First, clear all bits but the sign bit from the second operand (sign).
14885 CV[0] = ConstantFP::get(*Context,
14886 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14887 Constant *C = ConstantVector::get(CV);
14888 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14889 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14890 MachinePointerInfo::getConstantPool(),
14891 false, false, false, 16);
14892 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
14894 // Next, clear the sign bit from the first operand (magnitude).
14895 // If it's a constant, we can clear it here.
14896 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
14897 APFloat APF = Op0CN->getValueAPF();
14898 // If the magnitude is a positive zero, the sign bit alone is enough.
14899 if (APF.isPosZero())
14902 CV[0] = ConstantFP::get(*Context, APF);
14904 CV[0] = ConstantFP::get(
14906 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
14908 C = ConstantVector::get(CV);
14909 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14910 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14911 MachinePointerInfo::getConstantPool(),
14912 false, false, false, 16);
14913 // If the magnitude operand wasn't a constant, we need to AND out the sign.
14914 if (!isa<ConstantFPSDNode>(Op0))
14915 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
14917 // OR the magnitude value with the sign bit.
14918 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
14921 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
14922 SDValue N0 = Op.getOperand(0);
14924 MVT VT = Op.getSimpleValueType();
14926 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
14927 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
14928 DAG.getConstant(1, VT));
14929 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
14932 // Check whether an OR'd tree is PTEST-able.
14933 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
14934 SelectionDAG &DAG) {
14935 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
14937 if (!Subtarget->hasSSE41())
14940 if (!Op->hasOneUse())
14943 SDNode *N = Op.getNode();
14946 SmallVector<SDValue, 8> Opnds;
14947 DenseMap<SDValue, unsigned> VecInMap;
14948 SmallVector<SDValue, 8> VecIns;
14949 EVT VT = MVT::Other;
14951 // Recognize a special case where a vector is casted into wide integer to
14953 Opnds.push_back(N->getOperand(0));
14954 Opnds.push_back(N->getOperand(1));
14956 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
14957 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
14958 // BFS traverse all OR'd operands.
14959 if (I->getOpcode() == ISD::OR) {
14960 Opnds.push_back(I->getOperand(0));
14961 Opnds.push_back(I->getOperand(1));
14962 // Re-evaluate the number of nodes to be traversed.
14963 e += 2; // 2 more nodes (LHS and RHS) are pushed.
14967 // Quit if a non-EXTRACT_VECTOR_ELT
14968 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14971 // Quit if without a constant index.
14972 SDValue Idx = I->getOperand(1);
14973 if (!isa<ConstantSDNode>(Idx))
14976 SDValue ExtractedFromVec = I->getOperand(0);
14977 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
14978 if (M == VecInMap.end()) {
14979 VT = ExtractedFromVec.getValueType();
14980 // Quit if not 128/256-bit vector.
14981 if (!VT.is128BitVector() && !VT.is256BitVector())
14983 // Quit if not the same type.
14984 if (VecInMap.begin() != VecInMap.end() &&
14985 VT != VecInMap.begin()->first.getValueType())
14987 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
14988 VecIns.push_back(ExtractedFromVec);
14990 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
14993 assert((VT.is128BitVector() || VT.is256BitVector()) &&
14994 "Not extracted from 128-/256-bit vector.");
14996 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
14998 for (DenseMap<SDValue, unsigned>::const_iterator
14999 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15000 // Quit if not all elements are used.
15001 if (I->second != FullMask)
15005 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15007 // Cast all vectors into TestVT for PTEST.
15008 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15009 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15011 // If more than one full vectors are evaluated, OR them first before PTEST.
15012 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15013 // Each iteration will OR 2 nodes and append the result until there is only
15014 // 1 node left, i.e. the final OR'd value of all vectors.
15015 SDValue LHS = VecIns[Slot];
15016 SDValue RHS = VecIns[Slot + 1];
15017 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15020 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15021 VecIns.back(), VecIns.back());
15024 /// \brief return true if \c Op has a use that doesn't just read flags.
15025 static bool hasNonFlagsUse(SDValue Op) {
15026 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15028 SDNode *User = *UI;
15029 unsigned UOpNo = UI.getOperandNo();
15030 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15031 // Look pass truncate.
15032 UOpNo = User->use_begin().getOperandNo();
15033 User = *User->use_begin();
15036 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15037 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15043 /// Emit nodes that will be selected as "test Op0,Op0", or something
15045 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15046 SelectionDAG &DAG) const {
15047 if (Op.getValueType() == MVT::i1)
15048 // KORTEST instruction should be selected
15049 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15050 DAG.getConstant(0, Op.getValueType()));
15052 // CF and OF aren't always set the way we want. Determine which
15053 // of these we need.
15054 bool NeedCF = false;
15055 bool NeedOF = false;
15058 case X86::COND_A: case X86::COND_AE:
15059 case X86::COND_B: case X86::COND_BE:
15062 case X86::COND_G: case X86::COND_GE:
15063 case X86::COND_L: case X86::COND_LE:
15064 case X86::COND_O: case X86::COND_NO: {
15065 // Check if we really need to set the
15066 // Overflow flag. If NoSignedWrap is present
15067 // that is not actually needed.
15068 switch (Op->getOpcode()) {
15073 const BinaryWithFlagsSDNode *BinNode =
15074 cast<BinaryWithFlagsSDNode>(Op.getNode());
15075 if (BinNode->hasNoSignedWrap())
15085 // See if we can use the EFLAGS value from the operand instead of
15086 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15087 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15088 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15089 // Emit a CMP with 0, which is the TEST pattern.
15090 //if (Op.getValueType() == MVT::i1)
15091 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15092 // DAG.getConstant(0, MVT::i1));
15093 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15094 DAG.getConstant(0, Op.getValueType()));
15096 unsigned Opcode = 0;
15097 unsigned NumOperands = 0;
15099 // Truncate operations may prevent the merge of the SETCC instruction
15100 // and the arithmetic instruction before it. Attempt to truncate the operands
15101 // of the arithmetic instruction and use a reduced bit-width instruction.
15102 bool NeedTruncation = false;
15103 SDValue ArithOp = Op;
15104 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15105 SDValue Arith = Op->getOperand(0);
15106 // Both the trunc and the arithmetic op need to have one user each.
15107 if (Arith->hasOneUse())
15108 switch (Arith.getOpcode()) {
15115 NeedTruncation = true;
15121 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15122 // which may be the result of a CAST. We use the variable 'Op', which is the
15123 // non-casted variable when we check for possible users.
15124 switch (ArithOp.getOpcode()) {
15126 // Due to an isel shortcoming, be conservative if this add is likely to be
15127 // selected as part of a load-modify-store instruction. When the root node
15128 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15129 // uses of other nodes in the match, such as the ADD in this case. This
15130 // leads to the ADD being left around and reselected, with the result being
15131 // two adds in the output. Alas, even if none our users are stores, that
15132 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15133 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15134 // climbing the DAG back to the root, and it doesn't seem to be worth the
15136 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15137 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15138 if (UI->getOpcode() != ISD::CopyToReg &&
15139 UI->getOpcode() != ISD::SETCC &&
15140 UI->getOpcode() != ISD::STORE)
15143 if (ConstantSDNode *C =
15144 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15145 // An add of one will be selected as an INC.
15146 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15147 Opcode = X86ISD::INC;
15152 // An add of negative one (subtract of one) will be selected as a DEC.
15153 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15154 Opcode = X86ISD::DEC;
15160 // Otherwise use a regular EFLAGS-setting add.
15161 Opcode = X86ISD::ADD;
15166 // If we have a constant logical shift that's only used in a comparison
15167 // against zero turn it into an equivalent AND. This allows turning it into
15168 // a TEST instruction later.
15169 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15170 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15171 EVT VT = Op.getValueType();
15172 unsigned BitWidth = VT.getSizeInBits();
15173 unsigned ShAmt = Op->getConstantOperandVal(1);
15174 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15176 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15177 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15178 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15179 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15181 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15182 DAG.getConstant(Mask, VT));
15183 DAG.ReplaceAllUsesWith(Op, New);
15189 // If the primary and result isn't used, don't bother using X86ISD::AND,
15190 // because a TEST instruction will be better.
15191 if (!hasNonFlagsUse(Op))
15197 // Due to the ISEL shortcoming noted above, be conservative if this op is
15198 // likely to be selected as part of a load-modify-store instruction.
15199 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15200 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15201 if (UI->getOpcode() == ISD::STORE)
15204 // Otherwise use a regular EFLAGS-setting instruction.
15205 switch (ArithOp.getOpcode()) {
15206 default: llvm_unreachable("unexpected operator!");
15207 case ISD::SUB: Opcode = X86ISD::SUB; break;
15208 case ISD::XOR: Opcode = X86ISD::XOR; break;
15209 case ISD::AND: Opcode = X86ISD::AND; break;
15211 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15212 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15213 if (EFLAGS.getNode())
15216 Opcode = X86ISD::OR;
15230 return SDValue(Op.getNode(), 1);
15236 // If we found that truncation is beneficial, perform the truncation and
15238 if (NeedTruncation) {
15239 EVT VT = Op.getValueType();
15240 SDValue WideVal = Op->getOperand(0);
15241 EVT WideVT = WideVal.getValueType();
15242 unsigned ConvertedOp = 0;
15243 // Use a target machine opcode to prevent further DAGCombine
15244 // optimizations that may separate the arithmetic operations
15245 // from the setcc node.
15246 switch (WideVal.getOpcode()) {
15248 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15249 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15250 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15251 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15252 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15256 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15257 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15258 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15259 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15260 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15266 // Emit a CMP with 0, which is the TEST pattern.
15267 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15268 DAG.getConstant(0, Op.getValueType()));
15270 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15271 SmallVector<SDValue, 4> Ops;
15272 for (unsigned i = 0; i != NumOperands; ++i)
15273 Ops.push_back(Op.getOperand(i));
15275 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15276 DAG.ReplaceAllUsesWith(Op, New);
15277 return SDValue(New.getNode(), 1);
15280 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15282 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15283 SDLoc dl, SelectionDAG &DAG) const {
15284 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15285 if (C->getAPIntValue() == 0)
15286 return EmitTest(Op0, X86CC, dl, DAG);
15288 if (Op0.getValueType() == MVT::i1)
15289 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15292 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15293 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15294 // Do the comparison at i32 if it's smaller, besides the Atom case.
15295 // This avoids subregister aliasing issues. Keep the smaller reference
15296 // if we're optimizing for size, however, as that'll allow better folding
15297 // of memory operations.
15298 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15299 !DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
15300 AttributeSet::FunctionIndex, Attribute::MinSize) &&
15301 !Subtarget->isAtom()) {
15302 unsigned ExtendOp =
15303 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15304 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15305 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15307 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15308 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15309 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15311 return SDValue(Sub.getNode(), 1);
15313 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15316 /// Convert a comparison if required by the subtarget.
15317 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15318 SelectionDAG &DAG) const {
15319 // If the subtarget does not support the FUCOMI instruction, floating-point
15320 // comparisons have to be converted.
15321 if (Subtarget->hasCMov() ||
15322 Cmp.getOpcode() != X86ISD::CMP ||
15323 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15324 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15327 // The instruction selector will select an FUCOM instruction instead of
15328 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15329 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15330 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15332 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15333 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15334 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15335 DAG.getConstant(8, MVT::i8));
15336 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15337 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15340 /// The minimum architected relative accuracy is 2^-12. We need one
15341 /// Newton-Raphson step to have a good float result (24 bits of precision).
15342 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15343 DAGCombinerInfo &DCI,
15344 unsigned &RefinementSteps,
15345 bool &UseOneConstNR) const {
15346 // FIXME: We should use instruction latency models to calculate the cost of
15347 // each potential sequence, but this is very hard to do reliably because
15348 // at least Intel's Core* chips have variable timing based on the number of
15349 // significant digits in the divisor and/or sqrt operand.
15350 if (!Subtarget->useSqrtEst())
15353 EVT VT = Op.getValueType();
15355 // SSE1 has rsqrtss and rsqrtps.
15356 // TODO: Add support for AVX512 (v16f32).
15357 // It is likely not profitable to do this for f64 because a double-precision
15358 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15359 // instructions: convert to single, rsqrtss, convert back to double, refine
15360 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15361 // along with FMA, this could be a throughput win.
15362 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15363 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15364 RefinementSteps = 1;
15365 UseOneConstNR = false;
15366 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15371 /// The minimum architected relative accuracy is 2^-12. We need one
15372 /// Newton-Raphson step to have a good float result (24 bits of precision).
15373 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15374 DAGCombinerInfo &DCI,
15375 unsigned &RefinementSteps) const {
15376 // FIXME: We should use instruction latency models to calculate the cost of
15377 // each potential sequence, but this is very hard to do reliably because
15378 // at least Intel's Core* chips have variable timing based on the number of
15379 // significant digits in the divisor.
15380 if (!Subtarget->useReciprocalEst())
15383 EVT VT = Op.getValueType();
15385 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15386 // TODO: Add support for AVX512 (v16f32).
15387 // It is likely not profitable to do this for f64 because a double-precision
15388 // reciprocal estimate with refinement on x86 prior to FMA requires
15389 // 15 instructions: convert to single, rcpss, convert back to double, refine
15390 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15391 // along with FMA, this could be a throughput win.
15392 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15393 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15394 RefinementSteps = ReciprocalEstimateRefinementSteps;
15395 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15400 static bool isAllOnes(SDValue V) {
15401 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15402 return C && C->isAllOnesValue();
15405 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15406 /// if it's possible.
15407 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15408 SDLoc dl, SelectionDAG &DAG) const {
15409 SDValue Op0 = And.getOperand(0);
15410 SDValue Op1 = And.getOperand(1);
15411 if (Op0.getOpcode() == ISD::TRUNCATE)
15412 Op0 = Op0.getOperand(0);
15413 if (Op1.getOpcode() == ISD::TRUNCATE)
15414 Op1 = Op1.getOperand(0);
15417 if (Op1.getOpcode() == ISD::SHL)
15418 std::swap(Op0, Op1);
15419 if (Op0.getOpcode() == ISD::SHL) {
15420 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15421 if (And00C->getZExtValue() == 1) {
15422 // If we looked past a truncate, check that it's only truncating away
15424 unsigned BitWidth = Op0.getValueSizeInBits();
15425 unsigned AndBitWidth = And.getValueSizeInBits();
15426 if (BitWidth > AndBitWidth) {
15428 DAG.computeKnownBits(Op0, Zeros, Ones);
15429 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15433 RHS = Op0.getOperand(1);
15435 } else if (Op1.getOpcode() == ISD::Constant) {
15436 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15437 uint64_t AndRHSVal = AndRHS->getZExtValue();
15438 SDValue AndLHS = Op0;
15440 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15441 LHS = AndLHS.getOperand(0);
15442 RHS = AndLHS.getOperand(1);
15445 // Use BT if the immediate can't be encoded in a TEST instruction.
15446 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15448 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15452 if (LHS.getNode()) {
15453 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15454 // instruction. Since the shift amount is in-range-or-undefined, we know
15455 // that doing a bittest on the i32 value is ok. We extend to i32 because
15456 // the encoding for the i16 version is larger than the i32 version.
15457 // Also promote i16 to i32 for performance / code size reason.
15458 if (LHS.getValueType() == MVT::i8 ||
15459 LHS.getValueType() == MVT::i16)
15460 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15462 // If the operand types disagree, extend the shift amount to match. Since
15463 // BT ignores high bits (like shifts) we can use anyextend.
15464 if (LHS.getValueType() != RHS.getValueType())
15465 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15467 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15468 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15469 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15470 DAG.getConstant(Cond, MVT::i8), BT);
15476 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15478 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15483 // SSE Condition code mapping:
15492 switch (SetCCOpcode) {
15493 default: llvm_unreachable("Unexpected SETCC condition");
15495 case ISD::SETEQ: SSECC = 0; break;
15497 case ISD::SETGT: Swap = true; // Fallthrough
15499 case ISD::SETOLT: SSECC = 1; break;
15501 case ISD::SETGE: Swap = true; // Fallthrough
15503 case ISD::SETOLE: SSECC = 2; break;
15504 case ISD::SETUO: SSECC = 3; break;
15506 case ISD::SETNE: SSECC = 4; break;
15507 case ISD::SETULE: Swap = true; // Fallthrough
15508 case ISD::SETUGE: SSECC = 5; break;
15509 case ISD::SETULT: Swap = true; // Fallthrough
15510 case ISD::SETUGT: SSECC = 6; break;
15511 case ISD::SETO: SSECC = 7; break;
15513 case ISD::SETONE: SSECC = 8; break;
15516 std::swap(Op0, Op1);
15521 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15522 // ones, and then concatenate the result back.
15523 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15524 MVT VT = Op.getSimpleValueType();
15526 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15527 "Unsupported value type for operation");
15529 unsigned NumElems = VT.getVectorNumElements();
15531 SDValue CC = Op.getOperand(2);
15533 // Extract the LHS vectors
15534 SDValue LHS = Op.getOperand(0);
15535 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15536 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15538 // Extract the RHS vectors
15539 SDValue RHS = Op.getOperand(1);
15540 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15541 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15543 // Issue the operation on the smaller types and concatenate the result back
15544 MVT EltVT = VT.getVectorElementType();
15545 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15546 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15547 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15548 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15551 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15552 const X86Subtarget *Subtarget) {
15553 SDValue Op0 = Op.getOperand(0);
15554 SDValue Op1 = Op.getOperand(1);
15555 SDValue CC = Op.getOperand(2);
15556 MVT VT = Op.getSimpleValueType();
15559 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15560 Op.getValueType().getScalarType() == MVT::i1 &&
15561 "Cannot set masked compare for this operation");
15563 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15565 bool Unsigned = false;
15568 switch (SetCCOpcode) {
15569 default: llvm_unreachable("Unexpected SETCC condition");
15570 case ISD::SETNE: SSECC = 4; break;
15571 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15572 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15573 case ISD::SETLT: Swap = true; //fall-through
15574 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15575 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15576 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15577 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15578 case ISD::SETULE: Unsigned = true; //fall-through
15579 case ISD::SETLE: SSECC = 2; break;
15583 std::swap(Op0, Op1);
15585 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15586 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15587 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15588 DAG.getConstant(SSECC, MVT::i8));
15591 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15592 /// operand \p Op1. If non-trivial (for example because it's not constant)
15593 /// return an empty value.
15594 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15596 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15600 MVT VT = Op1.getSimpleValueType();
15601 MVT EVT = VT.getVectorElementType();
15602 unsigned n = VT.getVectorNumElements();
15603 SmallVector<SDValue, 8> ULTOp1;
15605 for (unsigned i = 0; i < n; ++i) {
15606 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15607 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15610 // Avoid underflow.
15611 APInt Val = Elt->getAPIntValue();
15615 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15618 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15621 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15622 SelectionDAG &DAG) {
15623 SDValue Op0 = Op.getOperand(0);
15624 SDValue Op1 = Op.getOperand(1);
15625 SDValue CC = Op.getOperand(2);
15626 MVT VT = Op.getSimpleValueType();
15627 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15628 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15633 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15634 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15637 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15638 unsigned Opc = X86ISD::CMPP;
15639 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15640 assert(VT.getVectorNumElements() <= 16);
15641 Opc = X86ISD::CMPM;
15643 // In the two special cases we can't handle, emit two comparisons.
15646 unsigned CombineOpc;
15647 if (SetCCOpcode == ISD::SETUEQ) {
15648 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15650 assert(SetCCOpcode == ISD::SETONE);
15651 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15654 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15655 DAG.getConstant(CC0, MVT::i8));
15656 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15657 DAG.getConstant(CC1, MVT::i8));
15658 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15660 // Handle all other FP comparisons here.
15661 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15662 DAG.getConstant(SSECC, MVT::i8));
15665 // Break 256-bit integer vector compare into smaller ones.
15666 if (VT.is256BitVector() && !Subtarget->hasInt256())
15667 return Lower256IntVSETCC(Op, DAG);
15669 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15670 EVT OpVT = Op1.getValueType();
15671 if (Subtarget->hasAVX512()) {
15672 if (Op1.getValueType().is512BitVector() ||
15673 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15674 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15675 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15677 // In AVX-512 architecture setcc returns mask with i1 elements,
15678 // But there is no compare instruction for i8 and i16 elements in KNL.
15679 // We are not talking about 512-bit operands in this case, these
15680 // types are illegal.
15682 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15683 OpVT.getVectorElementType().getSizeInBits() >= 8))
15684 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15685 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15688 // We are handling one of the integer comparisons here. Since SSE only has
15689 // GT and EQ comparisons for integer, swapping operands and multiple
15690 // operations may be required for some comparisons.
15692 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15693 bool Subus = false;
15695 switch (SetCCOpcode) {
15696 default: llvm_unreachable("Unexpected SETCC condition");
15697 case ISD::SETNE: Invert = true;
15698 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15699 case ISD::SETLT: Swap = true;
15700 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15701 case ISD::SETGE: Swap = true;
15702 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15703 Invert = true; break;
15704 case ISD::SETULT: Swap = true;
15705 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15706 FlipSigns = true; break;
15707 case ISD::SETUGE: Swap = true;
15708 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15709 FlipSigns = true; Invert = true; break;
15712 // Special case: Use min/max operations for SETULE/SETUGE
15713 MVT VET = VT.getVectorElementType();
15715 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15716 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15719 switch (SetCCOpcode) {
15721 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15722 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15725 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15728 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15729 if (!MinMax && hasSubus) {
15730 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15732 // t = psubus Op0, Op1
15733 // pcmpeq t, <0..0>
15734 switch (SetCCOpcode) {
15736 case ISD::SETULT: {
15737 // If the comparison is against a constant we can turn this into a
15738 // setule. With psubus, setule does not require a swap. This is
15739 // beneficial because the constant in the register is no longer
15740 // destructed as the destination so it can be hoisted out of a loop.
15741 // Only do this pre-AVX since vpcmp* is no longer destructive.
15742 if (Subtarget->hasAVX())
15744 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15745 if (ULEOp1.getNode()) {
15747 Subus = true; Invert = false; Swap = false;
15751 // Psubus is better than flip-sign because it requires no inversion.
15752 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15753 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15757 Opc = X86ISD::SUBUS;
15763 std::swap(Op0, Op1);
15765 // Check that the operation in question is available (most are plain SSE2,
15766 // but PCMPGTQ and PCMPEQQ have different requirements).
15767 if (VT == MVT::v2i64) {
15768 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15769 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15771 // First cast everything to the right type.
15772 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15773 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15775 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15776 // bits of the inputs before performing those operations. The lower
15777 // compare is always unsigned.
15780 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15782 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15783 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15784 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15785 Sign, Zero, Sign, Zero);
15787 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15788 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15790 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15791 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15792 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15794 // Create masks for only the low parts/high parts of the 64 bit integers.
15795 static const int MaskHi[] = { 1, 1, 3, 3 };
15796 static const int MaskLo[] = { 0, 0, 2, 2 };
15797 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15798 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15799 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15801 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15802 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15805 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15807 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15810 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15811 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15812 // pcmpeqd + pshufd + pand.
15813 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15815 // First cast everything to the right type.
15816 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15817 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15820 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15822 // Make sure the lower and upper halves are both all-ones.
15823 static const int Mask[] = { 1, 0, 3, 2 };
15824 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15825 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15828 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15830 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15834 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15835 // bits of the inputs before performing those operations.
15837 EVT EltVT = VT.getVectorElementType();
15838 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15839 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15840 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15843 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15845 // If the logical-not of the result is required, perform that now.
15847 Result = DAG.getNOT(dl, Result, VT);
15850 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15853 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15854 getZeroVector(VT, Subtarget, DAG, dl));
15859 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15861 MVT VT = Op.getSimpleValueType();
15863 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15865 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15866 && "SetCC type must be 8-bit or 1-bit integer");
15867 SDValue Op0 = Op.getOperand(0);
15868 SDValue Op1 = Op.getOperand(1);
15870 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15872 // Optimize to BT if possible.
15873 // Lower (X & (1 << N)) == 0 to BT(X, N).
15874 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15875 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15876 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15877 Op1.getOpcode() == ISD::Constant &&
15878 cast<ConstantSDNode>(Op1)->isNullValue() &&
15879 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15880 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15881 if (NewSetCC.getNode()) {
15883 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15888 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15890 if (Op1.getOpcode() == ISD::Constant &&
15891 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
15892 cast<ConstantSDNode>(Op1)->isNullValue()) &&
15893 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15895 // If the input is a setcc, then reuse the input setcc or use a new one with
15896 // the inverted condition.
15897 if (Op0.getOpcode() == X86ISD::SETCC) {
15898 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
15899 bool Invert = (CC == ISD::SETNE) ^
15900 cast<ConstantSDNode>(Op1)->isNullValue();
15904 CCode = X86::GetOppositeBranchCondition(CCode);
15905 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15906 DAG.getConstant(CCode, MVT::i8),
15907 Op0.getOperand(1));
15909 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15913 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
15914 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
15915 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15917 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
15918 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
15921 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
15922 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
15923 if (X86CC == X86::COND_INVALID)
15926 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
15927 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
15928 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15929 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
15931 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15935 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
15936 static bool isX86LogicalCmp(SDValue Op) {
15937 unsigned Opc = Op.getNode()->getOpcode();
15938 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
15939 Opc == X86ISD::SAHF)
15941 if (Op.getResNo() == 1 &&
15942 (Opc == X86ISD::ADD ||
15943 Opc == X86ISD::SUB ||
15944 Opc == X86ISD::ADC ||
15945 Opc == X86ISD::SBB ||
15946 Opc == X86ISD::SMUL ||
15947 Opc == X86ISD::UMUL ||
15948 Opc == X86ISD::INC ||
15949 Opc == X86ISD::DEC ||
15950 Opc == X86ISD::OR ||
15951 Opc == X86ISD::XOR ||
15952 Opc == X86ISD::AND))
15955 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
15961 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
15962 if (V.getOpcode() != ISD::TRUNCATE)
15965 SDValue VOp0 = V.getOperand(0);
15966 unsigned InBits = VOp0.getValueSizeInBits();
15967 unsigned Bits = V.getValueSizeInBits();
15968 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
15971 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
15972 bool addTest = true;
15973 SDValue Cond = Op.getOperand(0);
15974 SDValue Op1 = Op.getOperand(1);
15975 SDValue Op2 = Op.getOperand(2);
15977 EVT VT = Op1.getValueType();
15980 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
15981 // are available. Otherwise fp cmovs get lowered into a less efficient branch
15982 // sequence later on.
15983 if (Cond.getOpcode() == ISD::SETCC &&
15984 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
15985 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
15986 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
15987 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
15988 int SSECC = translateX86FSETCC(
15989 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
15992 if (Subtarget->hasAVX512()) {
15993 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
15994 DAG.getConstant(SSECC, MVT::i8));
15995 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
15997 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
15998 DAG.getConstant(SSECC, MVT::i8));
15999 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16000 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16001 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16005 if (Cond.getOpcode() == ISD::SETCC) {
16006 SDValue NewCond = LowerSETCC(Cond, DAG);
16007 if (NewCond.getNode())
16011 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16012 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16013 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16014 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16015 if (Cond.getOpcode() == X86ISD::SETCC &&
16016 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16017 isZero(Cond.getOperand(1).getOperand(1))) {
16018 SDValue Cmp = Cond.getOperand(1);
16020 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16022 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16023 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16024 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16026 SDValue CmpOp0 = Cmp.getOperand(0);
16027 // Apply further optimizations for special cases
16028 // (select (x != 0), -1, 0) -> neg & sbb
16029 // (select (x == 0), 0, -1) -> neg & sbb
16030 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16031 if (YC->isNullValue() &&
16032 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16033 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16034 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16035 DAG.getConstant(0, CmpOp0.getValueType()),
16037 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16038 DAG.getConstant(X86::COND_B, MVT::i8),
16039 SDValue(Neg.getNode(), 1));
16043 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16044 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16045 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16047 SDValue Res = // Res = 0 or -1.
16048 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16049 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16051 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16052 Res = DAG.getNOT(DL, Res, Res.getValueType());
16054 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16055 if (!N2C || !N2C->isNullValue())
16056 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16061 // Look past (and (setcc_carry (cmp ...)), 1).
16062 if (Cond.getOpcode() == ISD::AND &&
16063 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16064 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16065 if (C && C->getAPIntValue() == 1)
16066 Cond = Cond.getOperand(0);
16069 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16070 // setting operand in place of the X86ISD::SETCC.
16071 unsigned CondOpcode = Cond.getOpcode();
16072 if (CondOpcode == X86ISD::SETCC ||
16073 CondOpcode == X86ISD::SETCC_CARRY) {
16074 CC = Cond.getOperand(0);
16076 SDValue Cmp = Cond.getOperand(1);
16077 unsigned Opc = Cmp.getOpcode();
16078 MVT VT = Op.getSimpleValueType();
16080 bool IllegalFPCMov = false;
16081 if (VT.isFloatingPoint() && !VT.isVector() &&
16082 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16083 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16085 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16086 Opc == X86ISD::BT) { // FIXME
16090 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16091 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16092 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16093 Cond.getOperand(0).getValueType() != MVT::i8)) {
16094 SDValue LHS = Cond.getOperand(0);
16095 SDValue RHS = Cond.getOperand(1);
16096 unsigned X86Opcode;
16099 switch (CondOpcode) {
16100 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16101 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16102 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16103 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16104 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16105 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16106 default: llvm_unreachable("unexpected overflowing operator");
16108 if (CondOpcode == ISD::UMULO)
16109 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16112 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16114 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16116 if (CondOpcode == ISD::UMULO)
16117 Cond = X86Op.getValue(2);
16119 Cond = X86Op.getValue(1);
16121 CC = DAG.getConstant(X86Cond, MVT::i8);
16126 // Look pass the truncate if the high bits are known zero.
16127 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16128 Cond = Cond.getOperand(0);
16130 // We know the result of AND is compared against zero. Try to match
16132 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16133 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16134 if (NewSetCC.getNode()) {
16135 CC = NewSetCC.getOperand(0);
16136 Cond = NewSetCC.getOperand(1);
16143 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16144 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16147 // a < b ? -1 : 0 -> RES = ~setcc_carry
16148 // a < b ? 0 : -1 -> RES = setcc_carry
16149 // a >= b ? -1 : 0 -> RES = setcc_carry
16150 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16151 if (Cond.getOpcode() == X86ISD::SUB) {
16152 Cond = ConvertCmpIfNecessary(Cond, DAG);
16153 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16155 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16156 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16157 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16158 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16159 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16160 return DAG.getNOT(DL, Res, Res.getValueType());
16165 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16166 // widen the cmov and push the truncate through. This avoids introducing a new
16167 // branch during isel and doesn't add any extensions.
16168 if (Op.getValueType() == MVT::i8 &&
16169 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16170 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16171 if (T1.getValueType() == T2.getValueType() &&
16172 // Blacklist CopyFromReg to avoid partial register stalls.
16173 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16174 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16175 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16176 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16180 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16181 // condition is true.
16182 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16183 SDValue Ops[] = { Op2, Op1, CC, Cond };
16184 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16187 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16188 SelectionDAG &DAG) {
16189 MVT VT = Op->getSimpleValueType(0);
16190 SDValue In = Op->getOperand(0);
16191 MVT InVT = In.getSimpleValueType();
16192 MVT VTElt = VT.getVectorElementType();
16193 MVT InVTElt = InVT.getVectorElementType();
16197 if ((InVTElt == MVT::i1) &&
16198 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16199 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16201 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16202 VTElt.getSizeInBits() <= 16)) ||
16204 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16205 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16207 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16208 VTElt.getSizeInBits() >= 32))))
16209 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16211 unsigned int NumElts = VT.getVectorNumElements();
16213 if (NumElts != 8 && NumElts != 16)
16216 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16217 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16218 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16219 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16222 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16223 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16225 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16226 Constant *C = ConstantInt::get(*DAG.getContext(),
16227 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16229 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16230 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16231 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16232 MachinePointerInfo::getConstantPool(),
16233 false, false, false, Alignment);
16234 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16235 if (VT.is512BitVector())
16237 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16240 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16241 SelectionDAG &DAG) {
16242 MVT VT = Op->getSimpleValueType(0);
16243 SDValue In = Op->getOperand(0);
16244 MVT InVT = In.getSimpleValueType();
16247 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16248 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16250 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16251 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16252 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16255 if (Subtarget->hasInt256())
16256 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16258 // Optimize vectors in AVX mode
16259 // Sign extend v8i16 to v8i32 and
16262 // Divide input vector into two parts
16263 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16264 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16265 // concat the vectors to original VT
16267 unsigned NumElems = InVT.getVectorNumElements();
16268 SDValue Undef = DAG.getUNDEF(InVT);
16270 SmallVector<int,8> ShufMask1(NumElems, -1);
16271 for (unsigned i = 0; i != NumElems/2; ++i)
16274 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16276 SmallVector<int,8> ShufMask2(NumElems, -1);
16277 for (unsigned i = 0; i != NumElems/2; ++i)
16278 ShufMask2[i] = i + NumElems/2;
16280 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16282 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16283 VT.getVectorNumElements()/2);
16285 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16286 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16288 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16291 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16292 // may emit an illegal shuffle but the expansion is still better than scalar
16293 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16294 // we'll emit a shuffle and a arithmetic shift.
16295 // TODO: It is possible to support ZExt by zeroing the undef values during
16296 // the shuffle phase or after the shuffle.
16297 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16298 SelectionDAG &DAG) {
16299 MVT RegVT = Op.getSimpleValueType();
16300 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16301 assert(RegVT.isInteger() &&
16302 "We only custom lower integer vector sext loads.");
16304 // Nothing useful we can do without SSE2 shuffles.
16305 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16307 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16309 EVT MemVT = Ld->getMemoryVT();
16310 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16311 unsigned RegSz = RegVT.getSizeInBits();
16313 ISD::LoadExtType Ext = Ld->getExtensionType();
16315 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16316 && "Only anyext and sext are currently implemented.");
16317 assert(MemVT != RegVT && "Cannot extend to the same type");
16318 assert(MemVT.isVector() && "Must load a vector from memory");
16320 unsigned NumElems = RegVT.getVectorNumElements();
16321 unsigned MemSz = MemVT.getSizeInBits();
16322 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16324 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16325 // The only way in which we have a legal 256-bit vector result but not the
16326 // integer 256-bit operations needed to directly lower a sextload is if we
16327 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16328 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16329 // correctly legalized. We do this late to allow the canonical form of
16330 // sextload to persist throughout the rest of the DAG combiner -- it wants
16331 // to fold together any extensions it can, and so will fuse a sign_extend
16332 // of an sextload into a sextload targeting a wider value.
16334 if (MemSz == 128) {
16335 // Just switch this to a normal load.
16336 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16337 "it must be a legal 128-bit vector "
16339 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16340 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16341 Ld->isInvariant(), Ld->getAlignment());
16343 assert(MemSz < 128 &&
16344 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16345 // Do an sext load to a 128-bit vector type. We want to use the same
16346 // number of elements, but elements half as wide. This will end up being
16347 // recursively lowered by this routine, but will succeed as we definitely
16348 // have all the necessary features if we're using AVX1.
16350 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16351 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16353 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16354 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16355 Ld->isNonTemporal(), Ld->isInvariant(),
16356 Ld->getAlignment());
16359 // Replace chain users with the new chain.
16360 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16361 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16363 // Finally, do a normal sign-extend to the desired register.
16364 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16367 // All sizes must be a power of two.
16368 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16369 "Non-power-of-two elements are not custom lowered!");
16371 // Attempt to load the original value using scalar loads.
16372 // Find the largest scalar type that divides the total loaded size.
16373 MVT SclrLoadTy = MVT::i8;
16374 for (MVT Tp : MVT::integer_valuetypes()) {
16375 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16380 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16381 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16383 SclrLoadTy = MVT::f64;
16385 // Calculate the number of scalar loads that we need to perform
16386 // in order to load our vector from memory.
16387 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16389 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16390 "Can only lower sext loads with a single scalar load!");
16392 unsigned loadRegZize = RegSz;
16393 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16396 // Represent our vector as a sequence of elements which are the
16397 // largest scalar that we can load.
16398 EVT LoadUnitVecVT = EVT::getVectorVT(
16399 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16401 // Represent the data using the same element type that is stored in
16402 // memory. In practice, we ''widen'' MemVT.
16404 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16405 loadRegZize / MemVT.getScalarType().getSizeInBits());
16407 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16408 "Invalid vector type");
16410 // We can't shuffle using an illegal type.
16411 assert(TLI.isTypeLegal(WideVecVT) &&
16412 "We only lower types that form legal widened vector types");
16414 SmallVector<SDValue, 8> Chains;
16415 SDValue Ptr = Ld->getBasePtr();
16416 SDValue Increment =
16417 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16418 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16420 for (unsigned i = 0; i < NumLoads; ++i) {
16421 // Perform a single load.
16422 SDValue ScalarLoad =
16423 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16424 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16425 Ld->getAlignment());
16426 Chains.push_back(ScalarLoad.getValue(1));
16427 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16428 // another round of DAGCombining.
16430 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16432 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16433 ScalarLoad, DAG.getIntPtrConstant(i));
16435 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16438 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16440 // Bitcast the loaded value to a vector of the original element type, in
16441 // the size of the target vector type.
16442 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16443 unsigned SizeRatio = RegSz / MemSz;
16445 if (Ext == ISD::SEXTLOAD) {
16446 // If we have SSE4.1, we can directly emit a VSEXT node.
16447 if (Subtarget->hasSSE41()) {
16448 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16449 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16453 // Otherwise we'll shuffle the small elements in the high bits of the
16454 // larger type and perform an arithmetic shift. If the shift is not legal
16455 // it's better to scalarize.
16456 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16457 "We can't implement a sext load without an arithmetic right shift!");
16459 // Redistribute the loaded elements into the different locations.
16460 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16461 for (unsigned i = 0; i != NumElems; ++i)
16462 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16464 SDValue Shuff = DAG.getVectorShuffle(
16465 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16467 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16469 // Build the arithmetic shift.
16470 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16471 MemVT.getVectorElementType().getSizeInBits();
16473 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16475 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16479 // Redistribute the loaded elements into the different locations.
16480 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16481 for (unsigned i = 0; i != NumElems; ++i)
16482 ShuffleVec[i * SizeRatio] = i;
16484 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16485 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16487 // Bitcast to the requested type.
16488 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16489 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16493 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16494 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16495 // from the AND / OR.
16496 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16497 Opc = Op.getOpcode();
16498 if (Opc != ISD::OR && Opc != ISD::AND)
16500 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16501 Op.getOperand(0).hasOneUse() &&
16502 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16503 Op.getOperand(1).hasOneUse());
16506 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16507 // 1 and that the SETCC node has a single use.
16508 static bool isXor1OfSetCC(SDValue Op) {
16509 if (Op.getOpcode() != ISD::XOR)
16511 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16512 if (N1C && N1C->getAPIntValue() == 1) {
16513 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16514 Op.getOperand(0).hasOneUse();
16519 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16520 bool addTest = true;
16521 SDValue Chain = Op.getOperand(0);
16522 SDValue Cond = Op.getOperand(1);
16523 SDValue Dest = Op.getOperand(2);
16526 bool Inverted = false;
16528 if (Cond.getOpcode() == ISD::SETCC) {
16529 // Check for setcc([su]{add,sub,mul}o == 0).
16530 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16531 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16532 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16533 Cond.getOperand(0).getResNo() == 1 &&
16534 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16535 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16536 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16537 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16538 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16539 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16541 Cond = Cond.getOperand(0);
16543 SDValue NewCond = LowerSETCC(Cond, DAG);
16544 if (NewCond.getNode())
16549 // FIXME: LowerXALUO doesn't handle these!!
16550 else if (Cond.getOpcode() == X86ISD::ADD ||
16551 Cond.getOpcode() == X86ISD::SUB ||
16552 Cond.getOpcode() == X86ISD::SMUL ||
16553 Cond.getOpcode() == X86ISD::UMUL)
16554 Cond = LowerXALUO(Cond, DAG);
16557 // Look pass (and (setcc_carry (cmp ...)), 1).
16558 if (Cond.getOpcode() == ISD::AND &&
16559 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16560 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16561 if (C && C->getAPIntValue() == 1)
16562 Cond = Cond.getOperand(0);
16565 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16566 // setting operand in place of the X86ISD::SETCC.
16567 unsigned CondOpcode = Cond.getOpcode();
16568 if (CondOpcode == X86ISD::SETCC ||
16569 CondOpcode == X86ISD::SETCC_CARRY) {
16570 CC = Cond.getOperand(0);
16572 SDValue Cmp = Cond.getOperand(1);
16573 unsigned Opc = Cmp.getOpcode();
16574 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16575 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16579 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16583 // These can only come from an arithmetic instruction with overflow,
16584 // e.g. SADDO, UADDO.
16585 Cond = Cond.getNode()->getOperand(1);
16591 CondOpcode = Cond.getOpcode();
16592 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16593 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16594 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16595 Cond.getOperand(0).getValueType() != MVT::i8)) {
16596 SDValue LHS = Cond.getOperand(0);
16597 SDValue RHS = Cond.getOperand(1);
16598 unsigned X86Opcode;
16601 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16602 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16604 switch (CondOpcode) {
16605 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16607 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16609 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16612 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16613 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16615 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16617 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16620 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16621 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16622 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16623 default: llvm_unreachable("unexpected overflowing operator");
16626 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16627 if (CondOpcode == ISD::UMULO)
16628 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16631 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16633 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16635 if (CondOpcode == ISD::UMULO)
16636 Cond = X86Op.getValue(2);
16638 Cond = X86Op.getValue(1);
16640 CC = DAG.getConstant(X86Cond, MVT::i8);
16644 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16645 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16646 if (CondOpc == ISD::OR) {
16647 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16648 // two branches instead of an explicit OR instruction with a
16650 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16651 isX86LogicalCmp(Cmp)) {
16652 CC = Cond.getOperand(0).getOperand(0);
16653 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16654 Chain, Dest, CC, Cmp);
16655 CC = Cond.getOperand(1).getOperand(0);
16659 } else { // ISD::AND
16660 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16661 // two branches instead of an explicit AND instruction with a
16662 // separate test. However, we only do this if this block doesn't
16663 // have a fall-through edge, because this requires an explicit
16664 // jmp when the condition is false.
16665 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16666 isX86LogicalCmp(Cmp) &&
16667 Op.getNode()->hasOneUse()) {
16668 X86::CondCode CCode =
16669 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16670 CCode = X86::GetOppositeBranchCondition(CCode);
16671 CC = DAG.getConstant(CCode, MVT::i8);
16672 SDNode *User = *Op.getNode()->use_begin();
16673 // Look for an unconditional branch following this conditional branch.
16674 // We need this because we need to reverse the successors in order
16675 // to implement FCMP_OEQ.
16676 if (User->getOpcode() == ISD::BR) {
16677 SDValue FalseBB = User->getOperand(1);
16679 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16680 assert(NewBR == User);
16684 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16685 Chain, Dest, CC, Cmp);
16686 X86::CondCode CCode =
16687 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16688 CCode = X86::GetOppositeBranchCondition(CCode);
16689 CC = DAG.getConstant(CCode, MVT::i8);
16695 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16696 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16697 // It should be transformed during dag combiner except when the condition
16698 // is set by a arithmetics with overflow node.
16699 X86::CondCode CCode =
16700 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16701 CCode = X86::GetOppositeBranchCondition(CCode);
16702 CC = DAG.getConstant(CCode, MVT::i8);
16703 Cond = Cond.getOperand(0).getOperand(1);
16705 } else if (Cond.getOpcode() == ISD::SETCC &&
16706 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16707 // For FCMP_OEQ, we can emit
16708 // two branches instead of an explicit AND instruction with a
16709 // separate test. However, we only do this if this block doesn't
16710 // have a fall-through edge, because this requires an explicit
16711 // jmp when the condition is false.
16712 if (Op.getNode()->hasOneUse()) {
16713 SDNode *User = *Op.getNode()->use_begin();
16714 // Look for an unconditional branch following this conditional branch.
16715 // We need this because we need to reverse the successors in order
16716 // to implement FCMP_OEQ.
16717 if (User->getOpcode() == ISD::BR) {
16718 SDValue FalseBB = User->getOperand(1);
16720 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16721 assert(NewBR == User);
16725 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16726 Cond.getOperand(0), Cond.getOperand(1));
16727 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16728 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16729 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16730 Chain, Dest, CC, Cmp);
16731 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16736 } else if (Cond.getOpcode() == ISD::SETCC &&
16737 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16738 // For FCMP_UNE, we can emit
16739 // two branches instead of an explicit AND instruction with a
16740 // separate test. However, we only do this if this block doesn't
16741 // have a fall-through edge, because this requires an explicit
16742 // jmp when the condition is false.
16743 if (Op.getNode()->hasOneUse()) {
16744 SDNode *User = *Op.getNode()->use_begin();
16745 // Look for an unconditional branch following this conditional branch.
16746 // We need this because we need to reverse the successors in order
16747 // to implement FCMP_UNE.
16748 if (User->getOpcode() == ISD::BR) {
16749 SDValue FalseBB = User->getOperand(1);
16751 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16752 assert(NewBR == User);
16755 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16756 Cond.getOperand(0), Cond.getOperand(1));
16757 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16758 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16759 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16760 Chain, Dest, CC, Cmp);
16761 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16771 // Look pass the truncate if the high bits are known zero.
16772 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16773 Cond = Cond.getOperand(0);
16775 // We know the result of AND is compared against zero. Try to match
16777 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16778 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16779 if (NewSetCC.getNode()) {
16780 CC = NewSetCC.getOperand(0);
16781 Cond = NewSetCC.getOperand(1);
16788 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16789 CC = DAG.getConstant(X86Cond, MVT::i8);
16790 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16792 Cond = ConvertCmpIfNecessary(Cond, DAG);
16793 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16794 Chain, Dest, CC, Cond);
16797 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16798 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16799 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16800 // that the guard pages used by the OS virtual memory manager are allocated in
16801 // correct sequence.
16803 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16804 SelectionDAG &DAG) const {
16805 MachineFunction &MF = DAG.getMachineFunction();
16806 bool SplitStack = MF.shouldSplitStack();
16807 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16812 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16813 SDNode* Node = Op.getNode();
16815 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16816 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16817 " not tell us which reg is the stack pointer!");
16818 EVT VT = Node->getValueType(0);
16819 SDValue Tmp1 = SDValue(Node, 0);
16820 SDValue Tmp2 = SDValue(Node, 1);
16821 SDValue Tmp3 = Node->getOperand(2);
16822 SDValue Chain = Tmp1.getOperand(0);
16824 // Chain the dynamic stack allocation so that it doesn't modify the stack
16825 // pointer when other instructions are using the stack.
16826 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16829 SDValue Size = Tmp2.getOperand(1);
16830 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16831 Chain = SP.getValue(1);
16832 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16833 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16834 unsigned StackAlign = TFI.getStackAlignment();
16835 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16836 if (Align > StackAlign)
16837 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16838 DAG.getConstant(-(uint64_t)Align, VT));
16839 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16841 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16842 DAG.getIntPtrConstant(0, true), SDValue(),
16845 SDValue Ops[2] = { Tmp1, Tmp2 };
16846 return DAG.getMergeValues(Ops, dl);
16850 SDValue Chain = Op.getOperand(0);
16851 SDValue Size = Op.getOperand(1);
16852 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16853 EVT VT = Op.getNode()->getValueType(0);
16855 bool Is64Bit = Subtarget->is64Bit();
16856 EVT SPTy = getPointerTy();
16859 MachineRegisterInfo &MRI = MF.getRegInfo();
16862 // The 64 bit implementation of segmented stacks needs to clobber both r10
16863 // r11. This makes it impossible to use it along with nested parameters.
16864 const Function *F = MF.getFunction();
16866 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16868 if (I->hasNestAttr())
16869 report_fatal_error("Cannot use segmented stacks with functions that "
16870 "have nested arguments.");
16873 const TargetRegisterClass *AddrRegClass =
16874 getRegClassFor(getPointerTy());
16875 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16876 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16877 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16878 DAG.getRegister(Vreg, SPTy));
16879 SDValue Ops1[2] = { Value, Chain };
16880 return DAG.getMergeValues(Ops1, dl);
16883 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16885 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16886 Flag = Chain.getValue(1);
16887 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16889 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
16891 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
16892 unsigned SPReg = RegInfo->getStackRegister();
16893 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16894 Chain = SP.getValue(1);
16897 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
16898 DAG.getConstant(-(uint64_t)Align, VT));
16899 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
16902 SDValue Ops1[2] = { SP, Chain };
16903 return DAG.getMergeValues(Ops1, dl);
16907 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
16908 MachineFunction &MF = DAG.getMachineFunction();
16909 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
16911 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16914 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
16915 // vastart just stores the address of the VarArgsFrameIndex slot into the
16916 // memory location argument.
16917 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16919 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
16920 MachinePointerInfo(SV), false, false, 0);
16924 // gp_offset (0 - 6 * 8)
16925 // fp_offset (48 - 48 + 8 * 16)
16926 // overflow_arg_area (point to parameters coming in memory).
16928 SmallVector<SDValue, 8> MemOps;
16929 SDValue FIN = Op.getOperand(1);
16931 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
16932 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
16934 FIN, MachinePointerInfo(SV), false, false, 0);
16935 MemOps.push_back(Store);
16938 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16939 FIN, DAG.getIntPtrConstant(4));
16940 Store = DAG.getStore(Op.getOperand(0), DL,
16941 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
16943 FIN, MachinePointerInfo(SV, 4), false, false, 0);
16944 MemOps.push_back(Store);
16946 // Store ptr to overflow_arg_area
16947 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16948 FIN, DAG.getIntPtrConstant(4));
16949 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16951 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
16952 MachinePointerInfo(SV, 8),
16954 MemOps.push_back(Store);
16956 // Store ptr to reg_save_area.
16957 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16958 FIN, DAG.getIntPtrConstant(8));
16959 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
16961 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
16962 MachinePointerInfo(SV, 16), false, false, 0);
16963 MemOps.push_back(Store);
16964 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
16967 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
16968 assert(Subtarget->is64Bit() &&
16969 "LowerVAARG only handles 64-bit va_arg!");
16970 assert((Subtarget->isTargetLinux() ||
16971 Subtarget->isTargetDarwin()) &&
16972 "Unhandled target in LowerVAARG");
16973 assert(Op.getNode()->getNumOperands() == 4);
16974 SDValue Chain = Op.getOperand(0);
16975 SDValue SrcPtr = Op.getOperand(1);
16976 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16977 unsigned Align = Op.getConstantOperandVal(3);
16980 EVT ArgVT = Op.getNode()->getValueType(0);
16981 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
16982 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
16985 // Decide which area this value should be read from.
16986 // TODO: Implement the AMD64 ABI in its entirety. This simple
16987 // selection mechanism works only for the basic types.
16988 if (ArgVT == MVT::f80) {
16989 llvm_unreachable("va_arg for f80 not yet implemented");
16990 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
16991 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
16992 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
16993 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
16995 llvm_unreachable("Unhandled argument type in LowerVAARG");
16998 if (ArgMode == 2) {
16999 // Sanity Check: Make sure using fp_offset makes sense.
17000 assert(!DAG.getTarget().Options.UseSoftFloat &&
17001 !(DAG.getMachineFunction()
17002 .getFunction()->getAttributes()
17003 .hasAttribute(AttributeSet::FunctionIndex,
17004 Attribute::NoImplicitFloat)) &&
17005 Subtarget->hasSSE1());
17008 // Insert VAARG_64 node into the DAG
17009 // VAARG_64 returns two values: Variable Argument Address, Chain
17010 SmallVector<SDValue, 11> InstOps;
17011 InstOps.push_back(Chain);
17012 InstOps.push_back(SrcPtr);
17013 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17014 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17015 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17016 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17017 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17018 VTs, InstOps, MVT::i64,
17019 MachinePointerInfo(SV),
17021 /*Volatile=*/false,
17023 /*WriteMem=*/true);
17024 Chain = VAARG.getValue(1);
17026 // Load the next argument and return it
17027 return DAG.getLoad(ArgVT, dl,
17030 MachinePointerInfo(),
17031 false, false, false, 0);
17034 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17035 SelectionDAG &DAG) {
17036 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17037 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17038 SDValue Chain = Op.getOperand(0);
17039 SDValue DstPtr = Op.getOperand(1);
17040 SDValue SrcPtr = Op.getOperand(2);
17041 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17042 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17045 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17046 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17048 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17051 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17052 // amount is a constant. Takes immediate version of shift as input.
17053 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17054 SDValue SrcOp, uint64_t ShiftAmt,
17055 SelectionDAG &DAG) {
17056 MVT ElementType = VT.getVectorElementType();
17058 // Fold this packed shift into its first operand if ShiftAmt is 0.
17062 // Check for ShiftAmt >= element width
17063 if (ShiftAmt >= ElementType.getSizeInBits()) {
17064 if (Opc == X86ISD::VSRAI)
17065 ShiftAmt = ElementType.getSizeInBits() - 1;
17067 return DAG.getConstant(0, VT);
17070 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17071 && "Unknown target vector shift-by-constant node");
17073 // Fold this packed vector shift into a build vector if SrcOp is a
17074 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17075 if (VT == SrcOp.getSimpleValueType() &&
17076 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17077 SmallVector<SDValue, 8> Elts;
17078 unsigned NumElts = SrcOp->getNumOperands();
17079 ConstantSDNode *ND;
17082 default: llvm_unreachable(nullptr);
17083 case X86ISD::VSHLI:
17084 for (unsigned i=0; i!=NumElts; ++i) {
17085 SDValue CurrentOp = SrcOp->getOperand(i);
17086 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17087 Elts.push_back(CurrentOp);
17090 ND = cast<ConstantSDNode>(CurrentOp);
17091 const APInt &C = ND->getAPIntValue();
17092 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17095 case X86ISD::VSRLI:
17096 for (unsigned i=0; i!=NumElts; ++i) {
17097 SDValue CurrentOp = SrcOp->getOperand(i);
17098 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17099 Elts.push_back(CurrentOp);
17102 ND = cast<ConstantSDNode>(CurrentOp);
17103 const APInt &C = ND->getAPIntValue();
17104 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17107 case X86ISD::VSRAI:
17108 for (unsigned i=0; i!=NumElts; ++i) {
17109 SDValue CurrentOp = SrcOp->getOperand(i);
17110 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17111 Elts.push_back(CurrentOp);
17114 ND = cast<ConstantSDNode>(CurrentOp);
17115 const APInt &C = ND->getAPIntValue();
17116 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17121 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17124 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17127 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17128 // may or may not be a constant. Takes immediate version of shift as input.
17129 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17130 SDValue SrcOp, SDValue ShAmt,
17131 SelectionDAG &DAG) {
17132 MVT SVT = ShAmt.getSimpleValueType();
17133 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17135 // Catch shift-by-constant.
17136 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17137 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17138 CShAmt->getZExtValue(), DAG);
17140 // Change opcode to non-immediate version
17142 default: llvm_unreachable("Unknown target vector shift node");
17143 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17144 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17145 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17148 const X86Subtarget &Subtarget =
17149 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17150 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17151 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17152 // Let the shuffle legalizer expand this shift amount node.
17153 SDValue Op0 = ShAmt.getOperand(0);
17154 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17155 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17157 // Need to build a vector containing shift amount.
17158 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17159 SmallVector<SDValue, 4> ShOps;
17160 ShOps.push_back(ShAmt);
17161 if (SVT == MVT::i32) {
17162 ShOps.push_back(DAG.getConstant(0, SVT));
17163 ShOps.push_back(DAG.getUNDEF(SVT));
17165 ShOps.push_back(DAG.getUNDEF(SVT));
17167 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17168 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17171 // The return type has to be a 128-bit type with the same element
17172 // type as the input type.
17173 MVT EltVT = VT.getVectorElementType();
17174 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17176 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17177 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17180 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17181 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17182 /// necessary casting for \p Mask when lowering masking intrinsics.
17183 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17184 SDValue PreservedSrc,
17185 const X86Subtarget *Subtarget,
17186 SelectionDAG &DAG) {
17187 EVT VT = Op.getValueType();
17188 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17189 MVT::i1, VT.getVectorNumElements());
17190 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17191 Mask.getValueType().getSizeInBits());
17194 assert(MaskVT.isSimple() && "invalid mask type");
17196 if (isAllOnes(Mask))
17199 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17200 // are extracted by EXTRACT_SUBVECTOR.
17201 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17202 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17203 DAG.getIntPtrConstant(0));
17205 switch (Op.getOpcode()) {
17207 case X86ISD::PCMPEQM:
17208 case X86ISD::PCMPGTM:
17210 case X86ISD::CMPMU:
17211 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17213 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17214 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17215 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17218 /// \brief Creates an SDNode for a predicated scalar operation.
17219 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17220 /// The mask is comming as MVT::i8 and it should be truncated
17221 /// to MVT::i1 while lowering masking intrinsics.
17222 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17223 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17224 /// a scalar instruction.
17225 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17226 SDValue PreservedSrc,
17227 const X86Subtarget *Subtarget,
17228 SelectionDAG &DAG) {
17229 if (isAllOnes(Mask))
17232 EVT VT = Op.getValueType();
17234 // The mask should be of type MVT::i1
17235 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17237 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17238 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17239 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17242 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17243 SelectionDAG &DAG) {
17245 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17246 EVT VT = Op.getValueType();
17247 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17249 switch(IntrData->Type) {
17250 case INTR_TYPE_1OP:
17251 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17252 case INTR_TYPE_2OP:
17253 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17255 case INTR_TYPE_3OP:
17256 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17257 Op.getOperand(2), Op.getOperand(3));
17258 case INTR_TYPE_1OP_MASK_RM: {
17259 SDValue Src = Op.getOperand(1);
17260 SDValue Src0 = Op.getOperand(2);
17261 SDValue Mask = Op.getOperand(3);
17262 SDValue RoundingMode = Op.getOperand(4);
17263 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17265 Mask, Src0, Subtarget, DAG);
17267 case INTR_TYPE_SCALAR_MASK_RM: {
17268 SDValue Src1 = Op.getOperand(1);
17269 SDValue Src2 = Op.getOperand(2);
17270 SDValue Src0 = Op.getOperand(3);
17271 SDValue Mask = Op.getOperand(4);
17272 SDValue RoundingMode = Op.getOperand(5);
17273 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17275 Mask, Src0, Subtarget, DAG);
17277 case INTR_TYPE_2OP_MASK: {
17278 SDValue Mask = Op.getOperand(4);
17279 SDValue PassThru = Op.getOperand(3);
17280 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17281 if (IntrWithRoundingModeOpcode != 0) {
17282 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17283 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17284 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17285 dl, Op.getValueType(),
17286 Op.getOperand(1), Op.getOperand(2),
17287 Op.getOperand(3), Op.getOperand(5)),
17288 Mask, PassThru, Subtarget, DAG);
17291 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17294 Mask, PassThru, Subtarget, DAG);
17296 case FMA_OP_MASK: {
17297 SDValue Src1 = Op.getOperand(1);
17298 SDValue Src2 = Op.getOperand(2);
17299 SDValue Src3 = Op.getOperand(3);
17300 SDValue Mask = Op.getOperand(4);
17301 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17302 if (IntrWithRoundingModeOpcode != 0) {
17303 SDValue Rnd = Op.getOperand(5);
17304 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17305 X86::STATIC_ROUNDING::CUR_DIRECTION)
17306 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17307 dl, Op.getValueType(),
17308 Src1, Src2, Src3, Rnd),
17309 Mask, Src1, Subtarget, DAG);
17311 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17312 dl, Op.getValueType(),
17314 Mask, Src1, Subtarget, DAG);
17317 case CMP_MASK_CC: {
17318 // Comparison intrinsics with masks.
17319 // Example of transformation:
17320 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17321 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17323 // (v8i1 (insert_subvector undef,
17324 // (v2i1 (and (PCMPEQM %a, %b),
17325 // (extract_subvector
17326 // (v8i1 (bitcast %mask)), 0))), 0))))
17327 EVT VT = Op.getOperand(1).getValueType();
17328 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17329 VT.getVectorNumElements());
17330 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17331 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17332 Mask.getValueType().getSizeInBits());
17334 if (IntrData->Type == CMP_MASK_CC) {
17335 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17336 Op.getOperand(2), Op.getOperand(3));
17338 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17339 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17342 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17343 DAG.getTargetConstant(0, MaskVT),
17345 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17346 DAG.getUNDEF(BitcastVT), CmpMask,
17347 DAG.getIntPtrConstant(0));
17348 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17350 case COMI: { // Comparison intrinsics
17351 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17352 SDValue LHS = Op.getOperand(1);
17353 SDValue RHS = Op.getOperand(2);
17354 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17355 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17356 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17357 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17358 DAG.getConstant(X86CC, MVT::i8), Cond);
17359 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17362 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17363 Op.getOperand(1), Op.getOperand(2), DAG);
17365 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17366 Op.getSimpleValueType(),
17368 Op.getOperand(2), DAG),
17369 Op.getOperand(4), Op.getOperand(3), Subtarget,
17371 case COMPRESS_EXPAND_IN_REG: {
17372 SDValue Mask = Op.getOperand(3);
17373 SDValue DataToCompress = Op.getOperand(1);
17374 SDValue PassThru = Op.getOperand(2);
17375 if (isAllOnes(Mask)) // return data as is
17376 return Op.getOperand(1);
17377 EVT VT = Op.getValueType();
17378 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17379 VT.getVectorNumElements());
17380 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17381 Mask.getValueType().getSizeInBits());
17383 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17384 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17385 DAG.getIntPtrConstant(0));
17387 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17391 SDValue Mask = Op.getOperand(3);
17392 EVT VT = Op.getValueType();
17393 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17394 VT.getVectorNumElements());
17395 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17396 Mask.getValueType().getSizeInBits());
17398 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17399 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17400 DAG.getIntPtrConstant(0));
17401 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17410 default: return SDValue(); // Don't custom lower most intrinsics.
17412 case Intrinsic::x86_avx512_mask_valign_q_512:
17413 case Intrinsic::x86_avx512_mask_valign_d_512:
17414 // Vector source operands are swapped.
17415 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17416 Op.getValueType(), Op.getOperand(2),
17419 Op.getOperand(5), Op.getOperand(4),
17422 // ptest and testp intrinsics. The intrinsic these come from are designed to
17423 // return an integer value, not just an instruction so lower it to the ptest
17424 // or testp pattern and a setcc for the result.
17425 case Intrinsic::x86_sse41_ptestz:
17426 case Intrinsic::x86_sse41_ptestc:
17427 case Intrinsic::x86_sse41_ptestnzc:
17428 case Intrinsic::x86_avx_ptestz_256:
17429 case Intrinsic::x86_avx_ptestc_256:
17430 case Intrinsic::x86_avx_ptestnzc_256:
17431 case Intrinsic::x86_avx_vtestz_ps:
17432 case Intrinsic::x86_avx_vtestc_ps:
17433 case Intrinsic::x86_avx_vtestnzc_ps:
17434 case Intrinsic::x86_avx_vtestz_pd:
17435 case Intrinsic::x86_avx_vtestc_pd:
17436 case Intrinsic::x86_avx_vtestnzc_pd:
17437 case Intrinsic::x86_avx_vtestz_ps_256:
17438 case Intrinsic::x86_avx_vtestc_ps_256:
17439 case Intrinsic::x86_avx_vtestnzc_ps_256:
17440 case Intrinsic::x86_avx_vtestz_pd_256:
17441 case Intrinsic::x86_avx_vtestc_pd_256:
17442 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17443 bool IsTestPacked = false;
17446 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17447 case Intrinsic::x86_avx_vtestz_ps:
17448 case Intrinsic::x86_avx_vtestz_pd:
17449 case Intrinsic::x86_avx_vtestz_ps_256:
17450 case Intrinsic::x86_avx_vtestz_pd_256:
17451 IsTestPacked = true; // Fallthrough
17452 case Intrinsic::x86_sse41_ptestz:
17453 case Intrinsic::x86_avx_ptestz_256:
17455 X86CC = X86::COND_E;
17457 case Intrinsic::x86_avx_vtestc_ps:
17458 case Intrinsic::x86_avx_vtestc_pd:
17459 case Intrinsic::x86_avx_vtestc_ps_256:
17460 case Intrinsic::x86_avx_vtestc_pd_256:
17461 IsTestPacked = true; // Fallthrough
17462 case Intrinsic::x86_sse41_ptestc:
17463 case Intrinsic::x86_avx_ptestc_256:
17465 X86CC = X86::COND_B;
17467 case Intrinsic::x86_avx_vtestnzc_ps:
17468 case Intrinsic::x86_avx_vtestnzc_pd:
17469 case Intrinsic::x86_avx_vtestnzc_ps_256:
17470 case Intrinsic::x86_avx_vtestnzc_pd_256:
17471 IsTestPacked = true; // Fallthrough
17472 case Intrinsic::x86_sse41_ptestnzc:
17473 case Intrinsic::x86_avx_ptestnzc_256:
17475 X86CC = X86::COND_A;
17479 SDValue LHS = Op.getOperand(1);
17480 SDValue RHS = Op.getOperand(2);
17481 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17482 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17483 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17484 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17485 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17487 case Intrinsic::x86_avx512_kortestz_w:
17488 case Intrinsic::x86_avx512_kortestc_w: {
17489 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17490 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17491 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17492 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17493 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17494 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17495 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17498 case Intrinsic::x86_sse42_pcmpistria128:
17499 case Intrinsic::x86_sse42_pcmpestria128:
17500 case Intrinsic::x86_sse42_pcmpistric128:
17501 case Intrinsic::x86_sse42_pcmpestric128:
17502 case Intrinsic::x86_sse42_pcmpistrio128:
17503 case Intrinsic::x86_sse42_pcmpestrio128:
17504 case Intrinsic::x86_sse42_pcmpistris128:
17505 case Intrinsic::x86_sse42_pcmpestris128:
17506 case Intrinsic::x86_sse42_pcmpistriz128:
17507 case Intrinsic::x86_sse42_pcmpestriz128: {
17511 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17512 case Intrinsic::x86_sse42_pcmpistria128:
17513 Opcode = X86ISD::PCMPISTRI;
17514 X86CC = X86::COND_A;
17516 case Intrinsic::x86_sse42_pcmpestria128:
17517 Opcode = X86ISD::PCMPESTRI;
17518 X86CC = X86::COND_A;
17520 case Intrinsic::x86_sse42_pcmpistric128:
17521 Opcode = X86ISD::PCMPISTRI;
17522 X86CC = X86::COND_B;
17524 case Intrinsic::x86_sse42_pcmpestric128:
17525 Opcode = X86ISD::PCMPESTRI;
17526 X86CC = X86::COND_B;
17528 case Intrinsic::x86_sse42_pcmpistrio128:
17529 Opcode = X86ISD::PCMPISTRI;
17530 X86CC = X86::COND_O;
17532 case Intrinsic::x86_sse42_pcmpestrio128:
17533 Opcode = X86ISD::PCMPESTRI;
17534 X86CC = X86::COND_O;
17536 case Intrinsic::x86_sse42_pcmpistris128:
17537 Opcode = X86ISD::PCMPISTRI;
17538 X86CC = X86::COND_S;
17540 case Intrinsic::x86_sse42_pcmpestris128:
17541 Opcode = X86ISD::PCMPESTRI;
17542 X86CC = X86::COND_S;
17544 case Intrinsic::x86_sse42_pcmpistriz128:
17545 Opcode = X86ISD::PCMPISTRI;
17546 X86CC = X86::COND_E;
17548 case Intrinsic::x86_sse42_pcmpestriz128:
17549 Opcode = X86ISD::PCMPESTRI;
17550 X86CC = X86::COND_E;
17553 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17554 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17555 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17556 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17557 DAG.getConstant(X86CC, MVT::i8),
17558 SDValue(PCMP.getNode(), 1));
17559 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17562 case Intrinsic::x86_sse42_pcmpistri128:
17563 case Intrinsic::x86_sse42_pcmpestri128: {
17565 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17566 Opcode = X86ISD::PCMPISTRI;
17568 Opcode = X86ISD::PCMPESTRI;
17570 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17571 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17572 return DAG.getNode(Opcode, dl, VTs, NewOps);
17577 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17578 SDValue Src, SDValue Mask, SDValue Base,
17579 SDValue Index, SDValue ScaleOp, SDValue Chain,
17580 const X86Subtarget * Subtarget) {
17582 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17583 assert(C && "Invalid scale type");
17584 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17585 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17586 Index.getSimpleValueType().getVectorNumElements());
17588 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17590 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17592 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17593 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17594 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17595 SDValue Segment = DAG.getRegister(0, MVT::i32);
17596 if (Src.getOpcode() == ISD::UNDEF)
17597 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17598 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17599 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17600 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17601 return DAG.getMergeValues(RetOps, dl);
17604 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17605 SDValue Src, SDValue Mask, SDValue Base,
17606 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17608 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17609 assert(C && "Invalid scale type");
17610 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17611 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17612 SDValue Segment = DAG.getRegister(0, MVT::i32);
17613 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17614 Index.getSimpleValueType().getVectorNumElements());
17616 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17618 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17620 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17621 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17622 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17623 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17624 return SDValue(Res, 1);
17627 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17628 SDValue Mask, SDValue Base, SDValue Index,
17629 SDValue ScaleOp, SDValue Chain) {
17631 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17632 assert(C && "Invalid scale type");
17633 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17634 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17635 SDValue Segment = DAG.getRegister(0, MVT::i32);
17637 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17639 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17641 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17643 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17644 //SDVTList VTs = DAG.getVTList(MVT::Other);
17645 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17646 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17647 return SDValue(Res, 0);
17650 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17651 // read performance monitor counters (x86_rdpmc).
17652 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17653 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17654 SmallVectorImpl<SDValue> &Results) {
17655 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17656 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17659 // The ECX register is used to select the index of the performance counter
17661 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17663 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17665 // Reads the content of a 64-bit performance counter and returns it in the
17666 // registers EDX:EAX.
17667 if (Subtarget->is64Bit()) {
17668 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17669 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17672 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17673 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17676 Chain = HI.getValue(1);
17678 if (Subtarget->is64Bit()) {
17679 // The EAX register is loaded with the low-order 32 bits. The EDX register
17680 // is loaded with the supported high-order bits of the counter.
17681 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17682 DAG.getConstant(32, MVT::i8));
17683 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17684 Results.push_back(Chain);
17688 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17689 SDValue Ops[] = { LO, HI };
17690 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17691 Results.push_back(Pair);
17692 Results.push_back(Chain);
17695 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17696 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17697 // also used to custom lower READCYCLECOUNTER nodes.
17698 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17699 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17700 SmallVectorImpl<SDValue> &Results) {
17701 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17702 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17705 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17706 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17707 // and the EAX register is loaded with the low-order 32 bits.
17708 if (Subtarget->is64Bit()) {
17709 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17710 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17713 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17714 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17717 SDValue Chain = HI.getValue(1);
17719 if (Opcode == X86ISD::RDTSCP_DAG) {
17720 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17722 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17723 // the ECX register. Add 'ecx' explicitly to the chain.
17724 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17726 // Explicitly store the content of ECX at the location passed in input
17727 // to the 'rdtscp' intrinsic.
17728 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17729 MachinePointerInfo(), false, false, 0);
17732 if (Subtarget->is64Bit()) {
17733 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17734 // the EAX register is loaded with the low-order 32 bits.
17735 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17736 DAG.getConstant(32, MVT::i8));
17737 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17738 Results.push_back(Chain);
17742 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17743 SDValue Ops[] = { LO, HI };
17744 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17745 Results.push_back(Pair);
17746 Results.push_back(Chain);
17749 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17750 SelectionDAG &DAG) {
17751 SmallVector<SDValue, 2> Results;
17753 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17755 return DAG.getMergeValues(Results, DL);
17759 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17760 SelectionDAG &DAG) {
17761 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17763 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17768 switch(IntrData->Type) {
17770 llvm_unreachable("Unknown Intrinsic Type");
17774 // Emit the node with the right value type.
17775 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17776 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17778 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17779 // Otherwise return the value from Rand, which is always 0, casted to i32.
17780 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17781 DAG.getConstant(1, Op->getValueType(1)),
17782 DAG.getConstant(X86::COND_B, MVT::i32),
17783 SDValue(Result.getNode(), 1) };
17784 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17785 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17788 // Return { result, isValid, chain }.
17789 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17790 SDValue(Result.getNode(), 2));
17793 //gather(v1, mask, index, base, scale);
17794 SDValue Chain = Op.getOperand(0);
17795 SDValue Src = Op.getOperand(2);
17796 SDValue Base = Op.getOperand(3);
17797 SDValue Index = Op.getOperand(4);
17798 SDValue Mask = Op.getOperand(5);
17799 SDValue Scale = Op.getOperand(6);
17800 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17804 //scatter(base, mask, index, v1, scale);
17805 SDValue Chain = Op.getOperand(0);
17806 SDValue Base = Op.getOperand(2);
17807 SDValue Mask = Op.getOperand(3);
17808 SDValue Index = Op.getOperand(4);
17809 SDValue Src = Op.getOperand(5);
17810 SDValue Scale = Op.getOperand(6);
17811 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17814 SDValue Hint = Op.getOperand(6);
17816 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17817 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17818 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17819 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17820 SDValue Chain = Op.getOperand(0);
17821 SDValue Mask = Op.getOperand(2);
17822 SDValue Index = Op.getOperand(3);
17823 SDValue Base = Op.getOperand(4);
17824 SDValue Scale = Op.getOperand(5);
17825 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17827 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17829 SmallVector<SDValue, 2> Results;
17830 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17831 return DAG.getMergeValues(Results, dl);
17833 // Read Performance Monitoring Counters.
17835 SmallVector<SDValue, 2> Results;
17836 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17837 return DAG.getMergeValues(Results, dl);
17839 // XTEST intrinsics.
17841 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17842 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17843 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17844 DAG.getConstant(X86::COND_NE, MVT::i8),
17846 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17847 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17848 Ret, SDValue(InTrans.getNode(), 1));
17852 SmallVector<SDValue, 2> Results;
17853 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17854 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17855 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17856 DAG.getConstant(-1, MVT::i8));
17857 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17858 Op.getOperand(4), GenCF.getValue(1));
17859 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17860 Op.getOperand(5), MachinePointerInfo(),
17862 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17863 DAG.getConstant(X86::COND_B, MVT::i8),
17865 Results.push_back(SetCC);
17866 Results.push_back(Store);
17867 return DAG.getMergeValues(Results, dl);
17869 case COMPRESS_TO_MEM: {
17871 SDValue Mask = Op.getOperand(4);
17872 SDValue DataToCompress = Op.getOperand(3);
17873 SDValue Addr = Op.getOperand(2);
17874 SDValue Chain = Op.getOperand(0);
17876 if (isAllOnes(Mask)) // return just a store
17877 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17878 MachinePointerInfo(), false, false, 0);
17880 EVT VT = DataToCompress.getValueType();
17881 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17882 VT.getVectorNumElements());
17883 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17884 Mask.getValueType().getSizeInBits());
17885 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17886 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17887 DAG.getIntPtrConstant(0));
17889 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17890 DataToCompress, DAG.getUNDEF(VT));
17891 return DAG.getStore(Chain, dl, Compressed, Addr,
17892 MachinePointerInfo(), false, false, 0);
17894 case EXPAND_FROM_MEM: {
17896 SDValue Mask = Op.getOperand(4);
17897 SDValue PathThru = Op.getOperand(3);
17898 SDValue Addr = Op.getOperand(2);
17899 SDValue Chain = Op.getOperand(0);
17900 EVT VT = Op.getValueType();
17902 if (isAllOnes(Mask)) // return just a load
17903 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
17905 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17906 VT.getVectorNumElements());
17907 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17908 Mask.getValueType().getSizeInBits());
17909 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17910 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17911 DAG.getIntPtrConstant(0));
17913 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
17914 false, false, false, 0);
17916 SmallVector<SDValue, 2> Results;
17917 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
17919 Results.push_back(Chain);
17920 return DAG.getMergeValues(Results, dl);
17925 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
17926 SelectionDAG &DAG) const {
17927 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17928 MFI->setReturnAddressIsTaken(true);
17930 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
17933 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17935 EVT PtrVT = getPointerTy();
17938 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
17939 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17940 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
17941 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17942 DAG.getNode(ISD::ADD, dl, PtrVT,
17943 FrameAddr, Offset),
17944 MachinePointerInfo(), false, false, false, 0);
17947 // Just load the return address.
17948 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
17949 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17950 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
17953 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
17954 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17955 MFI->setFrameAddressIsTaken(true);
17957 EVT VT = Op.getValueType();
17958 SDLoc dl(Op); // FIXME probably not meaningful
17959 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17960 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17961 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(
17962 DAG.getMachineFunction());
17963 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
17964 (FrameReg == X86::EBP && VT == MVT::i32)) &&
17965 "Invalid Frame Register!");
17966 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
17968 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
17969 MachinePointerInfo(),
17970 false, false, false, 0);
17974 // FIXME? Maybe this could be a TableGen attribute on some registers and
17975 // this table could be generated automatically from RegInfo.
17976 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
17978 unsigned Reg = StringSwitch<unsigned>(RegName)
17979 .Case("esp", X86::ESP)
17980 .Case("rsp", X86::RSP)
17984 report_fatal_error("Invalid register name global variable");
17987 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
17988 SelectionDAG &DAG) const {
17989 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17990 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
17993 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
17994 SDValue Chain = Op.getOperand(0);
17995 SDValue Offset = Op.getOperand(1);
17996 SDValue Handler = Op.getOperand(2);
17999 EVT PtrVT = getPointerTy();
18000 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18001 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18002 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18003 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18004 "Invalid Frame Register!");
18005 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18006 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18008 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18009 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18010 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18011 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18013 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18015 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18016 DAG.getRegister(StoreAddrReg, PtrVT));
18019 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18020 SelectionDAG &DAG) const {
18022 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18023 DAG.getVTList(MVT::i32, MVT::Other),
18024 Op.getOperand(0), Op.getOperand(1));
18027 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18028 SelectionDAG &DAG) const {
18030 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18031 Op.getOperand(0), Op.getOperand(1));
18034 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18035 return Op.getOperand(0);
18038 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18039 SelectionDAG &DAG) const {
18040 SDValue Root = Op.getOperand(0);
18041 SDValue Trmp = Op.getOperand(1); // trampoline
18042 SDValue FPtr = Op.getOperand(2); // nested function
18043 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18046 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18047 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18049 if (Subtarget->is64Bit()) {
18050 SDValue OutChains[6];
18052 // Large code-model.
18053 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18054 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18056 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18057 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18059 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18061 // Load the pointer to the nested function into R11.
18062 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18063 SDValue Addr = Trmp;
18064 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18065 Addr, MachinePointerInfo(TrmpAddr),
18068 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18069 DAG.getConstant(2, MVT::i64));
18070 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18071 MachinePointerInfo(TrmpAddr, 2),
18074 // Load the 'nest' parameter value into R10.
18075 // R10 is specified in X86CallingConv.td
18076 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18077 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18078 DAG.getConstant(10, MVT::i64));
18079 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18080 Addr, MachinePointerInfo(TrmpAddr, 10),
18083 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18084 DAG.getConstant(12, MVT::i64));
18085 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18086 MachinePointerInfo(TrmpAddr, 12),
18089 // Jump to the nested function.
18090 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18091 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18092 DAG.getConstant(20, MVT::i64));
18093 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18094 Addr, MachinePointerInfo(TrmpAddr, 20),
18097 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18098 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18099 DAG.getConstant(22, MVT::i64));
18100 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18101 MachinePointerInfo(TrmpAddr, 22),
18104 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18106 const Function *Func =
18107 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18108 CallingConv::ID CC = Func->getCallingConv();
18113 llvm_unreachable("Unsupported calling convention");
18114 case CallingConv::C:
18115 case CallingConv::X86_StdCall: {
18116 // Pass 'nest' parameter in ECX.
18117 // Must be kept in sync with X86CallingConv.td
18118 NestReg = X86::ECX;
18120 // Check that ECX wasn't needed by an 'inreg' parameter.
18121 FunctionType *FTy = Func->getFunctionType();
18122 const AttributeSet &Attrs = Func->getAttributes();
18124 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18125 unsigned InRegCount = 0;
18128 for (FunctionType::param_iterator I = FTy->param_begin(),
18129 E = FTy->param_end(); I != E; ++I, ++Idx)
18130 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18131 // FIXME: should only count parameters that are lowered to integers.
18132 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18134 if (InRegCount > 2) {
18135 report_fatal_error("Nest register in use - reduce number of inreg"
18141 case CallingConv::X86_FastCall:
18142 case CallingConv::X86_ThisCall:
18143 case CallingConv::Fast:
18144 // Pass 'nest' parameter in EAX.
18145 // Must be kept in sync with X86CallingConv.td
18146 NestReg = X86::EAX;
18150 SDValue OutChains[4];
18151 SDValue Addr, Disp;
18153 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18154 DAG.getConstant(10, MVT::i32));
18155 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18157 // This is storing the opcode for MOV32ri.
18158 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18159 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18160 OutChains[0] = DAG.getStore(Root, dl,
18161 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18162 Trmp, MachinePointerInfo(TrmpAddr),
18165 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18166 DAG.getConstant(1, MVT::i32));
18167 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18168 MachinePointerInfo(TrmpAddr, 1),
18171 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18172 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18173 DAG.getConstant(5, MVT::i32));
18174 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18175 MachinePointerInfo(TrmpAddr, 5),
18178 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18179 DAG.getConstant(6, MVT::i32));
18180 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18181 MachinePointerInfo(TrmpAddr, 6),
18184 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18188 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18189 SelectionDAG &DAG) const {
18191 The rounding mode is in bits 11:10 of FPSR, and has the following
18193 00 Round to nearest
18198 FLT_ROUNDS, on the other hand, expects the following:
18205 To perform the conversion, we do:
18206 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18209 MachineFunction &MF = DAG.getMachineFunction();
18210 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18211 unsigned StackAlignment = TFI.getStackAlignment();
18212 MVT VT = Op.getSimpleValueType();
18215 // Save FP Control Word to stack slot
18216 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18217 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18219 MachineMemOperand *MMO =
18220 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18221 MachineMemOperand::MOStore, 2, 2);
18223 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18224 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18225 DAG.getVTList(MVT::Other),
18226 Ops, MVT::i16, MMO);
18228 // Load FP Control Word from stack slot
18229 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18230 MachinePointerInfo(), false, false, false, 0);
18232 // Transform as necessary
18234 DAG.getNode(ISD::SRL, DL, MVT::i16,
18235 DAG.getNode(ISD::AND, DL, MVT::i16,
18236 CWD, DAG.getConstant(0x800, MVT::i16)),
18237 DAG.getConstant(11, MVT::i8));
18239 DAG.getNode(ISD::SRL, DL, MVT::i16,
18240 DAG.getNode(ISD::AND, DL, MVT::i16,
18241 CWD, DAG.getConstant(0x400, MVT::i16)),
18242 DAG.getConstant(9, MVT::i8));
18245 DAG.getNode(ISD::AND, DL, MVT::i16,
18246 DAG.getNode(ISD::ADD, DL, MVT::i16,
18247 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18248 DAG.getConstant(1, MVT::i16)),
18249 DAG.getConstant(3, MVT::i16));
18251 return DAG.getNode((VT.getSizeInBits() < 16 ?
18252 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18255 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18256 MVT VT = Op.getSimpleValueType();
18258 unsigned NumBits = VT.getSizeInBits();
18261 Op = Op.getOperand(0);
18262 if (VT == MVT::i8) {
18263 // Zero extend to i32 since there is not an i8 bsr.
18265 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18268 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18269 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18270 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18272 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18275 DAG.getConstant(NumBits+NumBits-1, OpVT),
18276 DAG.getConstant(X86::COND_E, MVT::i8),
18279 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18281 // Finally xor with NumBits-1.
18282 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18285 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18289 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18290 MVT VT = Op.getSimpleValueType();
18292 unsigned NumBits = VT.getSizeInBits();
18295 Op = Op.getOperand(0);
18296 if (VT == MVT::i8) {
18297 // Zero extend to i32 since there is not an i8 bsr.
18299 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18302 // Issue a bsr (scan bits in reverse).
18303 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18304 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18306 // And xor with NumBits-1.
18307 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18310 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18314 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18315 MVT VT = Op.getSimpleValueType();
18316 unsigned NumBits = VT.getSizeInBits();
18318 Op = Op.getOperand(0);
18320 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18321 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18322 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18324 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18327 DAG.getConstant(NumBits, VT),
18328 DAG.getConstant(X86::COND_E, MVT::i8),
18331 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18334 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18335 // ones, and then concatenate the result back.
18336 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18337 MVT VT = Op.getSimpleValueType();
18339 assert(VT.is256BitVector() && VT.isInteger() &&
18340 "Unsupported value type for operation");
18342 unsigned NumElems = VT.getVectorNumElements();
18345 // Extract the LHS vectors
18346 SDValue LHS = Op.getOperand(0);
18347 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18348 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18350 // Extract the RHS vectors
18351 SDValue RHS = Op.getOperand(1);
18352 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18353 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18355 MVT EltVT = VT.getVectorElementType();
18356 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18358 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18359 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18360 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18363 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18364 assert(Op.getSimpleValueType().is256BitVector() &&
18365 Op.getSimpleValueType().isInteger() &&
18366 "Only handle AVX 256-bit vector integer operation");
18367 return Lower256IntArith(Op, DAG);
18370 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18371 assert(Op.getSimpleValueType().is256BitVector() &&
18372 Op.getSimpleValueType().isInteger() &&
18373 "Only handle AVX 256-bit vector integer operation");
18374 return Lower256IntArith(Op, DAG);
18377 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18378 SelectionDAG &DAG) {
18380 MVT VT = Op.getSimpleValueType();
18382 // Decompose 256-bit ops into smaller 128-bit ops.
18383 if (VT.is256BitVector() && !Subtarget->hasInt256())
18384 return Lower256IntArith(Op, DAG);
18386 SDValue A = Op.getOperand(0);
18387 SDValue B = Op.getOperand(1);
18389 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18390 if (VT == MVT::v4i32) {
18391 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18392 "Should not custom lower when pmuldq is available!");
18394 // Extract the odd parts.
18395 static const int UnpackMask[] = { 1, -1, 3, -1 };
18396 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18397 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18399 // Multiply the even parts.
18400 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18401 // Now multiply odd parts.
18402 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18404 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18405 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18407 // Merge the two vectors back together with a shuffle. This expands into 2
18409 static const int ShufMask[] = { 0, 4, 2, 6 };
18410 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18413 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18414 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18416 // Ahi = psrlqi(a, 32);
18417 // Bhi = psrlqi(b, 32);
18419 // AloBlo = pmuludq(a, b);
18420 // AloBhi = pmuludq(a, Bhi);
18421 // AhiBlo = pmuludq(Ahi, b);
18423 // AloBhi = psllqi(AloBhi, 32);
18424 // AhiBlo = psllqi(AhiBlo, 32);
18425 // return AloBlo + AloBhi + AhiBlo;
18427 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18428 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18430 // Bit cast to 32-bit vectors for MULUDQ
18431 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18432 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18433 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18434 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18435 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18436 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18438 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18439 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18440 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18442 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18443 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18445 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18446 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18449 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18450 assert(Subtarget->isTargetWin64() && "Unexpected target");
18451 EVT VT = Op.getValueType();
18452 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18453 "Unexpected return type for lowering");
18457 switch (Op->getOpcode()) {
18458 default: llvm_unreachable("Unexpected request for libcall!");
18459 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18460 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18461 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18462 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18463 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18464 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18468 SDValue InChain = DAG.getEntryNode();
18470 TargetLowering::ArgListTy Args;
18471 TargetLowering::ArgListEntry Entry;
18472 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18473 EVT ArgVT = Op->getOperand(i).getValueType();
18474 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18475 "Unexpected argument type for lowering");
18476 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18477 Entry.Node = StackPtr;
18478 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18480 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18481 Entry.Ty = PointerType::get(ArgTy,0);
18482 Entry.isSExt = false;
18483 Entry.isZExt = false;
18484 Args.push_back(Entry);
18487 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18490 TargetLowering::CallLoweringInfo CLI(DAG);
18491 CLI.setDebugLoc(dl).setChain(InChain)
18492 .setCallee(getLibcallCallingConv(LC),
18493 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18494 Callee, std::move(Args), 0)
18495 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18497 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18498 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18501 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18502 SelectionDAG &DAG) {
18503 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18504 EVT VT = Op0.getValueType();
18507 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18508 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18510 // PMULxD operations multiply each even value (starting at 0) of LHS with
18511 // the related value of RHS and produce a widen result.
18512 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18513 // => <2 x i64> <ae|cg>
18515 // In other word, to have all the results, we need to perform two PMULxD:
18516 // 1. one with the even values.
18517 // 2. one with the odd values.
18518 // To achieve #2, with need to place the odd values at an even position.
18520 // Place the odd value at an even position (basically, shift all values 1
18521 // step to the left):
18522 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18523 // <a|b|c|d> => <b|undef|d|undef>
18524 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18525 // <e|f|g|h> => <f|undef|h|undef>
18526 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18528 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18530 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18531 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18533 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18534 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18535 // => <2 x i64> <ae|cg>
18536 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18537 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18538 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18539 // => <2 x i64> <bf|dh>
18540 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18541 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18543 // Shuffle it back into the right order.
18544 SDValue Highs, Lows;
18545 if (VT == MVT::v8i32) {
18546 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18547 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18548 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18549 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18551 const int HighMask[] = {1, 5, 3, 7};
18552 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18553 const int LowMask[] = {0, 4, 2, 6};
18554 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18557 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18558 // unsigned multiply.
18559 if (IsSigned && !Subtarget->hasSSE41()) {
18561 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18562 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18563 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18564 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18565 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18567 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18568 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18571 // The first result of MUL_LOHI is actually the low value, followed by the
18573 SDValue Ops[] = {Lows, Highs};
18574 return DAG.getMergeValues(Ops, dl);
18577 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18578 const X86Subtarget *Subtarget) {
18579 MVT VT = Op.getSimpleValueType();
18581 SDValue R = Op.getOperand(0);
18582 SDValue Amt = Op.getOperand(1);
18584 // Optimize shl/srl/sra with constant shift amount.
18585 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18586 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18587 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18589 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18590 (Subtarget->hasInt256() &&
18591 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18592 (Subtarget->hasAVX512() &&
18593 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18594 if (Op.getOpcode() == ISD::SHL)
18595 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18597 if (Op.getOpcode() == ISD::SRL)
18598 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18600 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18601 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18605 if (VT == MVT::v16i8) {
18606 if (Op.getOpcode() == ISD::SHL) {
18607 // Make a large shift.
18608 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18609 MVT::v8i16, R, ShiftAmt,
18611 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18612 // Zero out the rightmost bits.
18613 SmallVector<SDValue, 16> V(16,
18614 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18616 return DAG.getNode(ISD::AND, dl, VT, SHL,
18617 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18619 if (Op.getOpcode() == ISD::SRL) {
18620 // Make a large shift.
18621 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18622 MVT::v8i16, R, ShiftAmt,
18624 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18625 // Zero out the leftmost bits.
18626 SmallVector<SDValue, 16> V(16,
18627 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18629 return DAG.getNode(ISD::AND, dl, VT, SRL,
18630 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18632 if (Op.getOpcode() == ISD::SRA) {
18633 if (ShiftAmt == 7) {
18634 // R s>> 7 === R s< 0
18635 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18636 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18639 // R s>> a === ((R u>> a) ^ m) - m
18640 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18641 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18643 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18644 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18645 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18648 llvm_unreachable("Unknown shift opcode.");
18651 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18652 if (Op.getOpcode() == ISD::SHL) {
18653 // Make a large shift.
18654 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18655 MVT::v16i16, R, ShiftAmt,
18657 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18658 // Zero out the rightmost bits.
18659 SmallVector<SDValue, 32> V(32,
18660 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18662 return DAG.getNode(ISD::AND, dl, VT, SHL,
18663 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18665 if (Op.getOpcode() == ISD::SRL) {
18666 // Make a large shift.
18667 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18668 MVT::v16i16, R, ShiftAmt,
18670 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18671 // Zero out the leftmost bits.
18672 SmallVector<SDValue, 32> V(32,
18673 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18675 return DAG.getNode(ISD::AND, dl, VT, SRL,
18676 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18678 if (Op.getOpcode() == ISD::SRA) {
18679 if (ShiftAmt == 7) {
18680 // R s>> 7 === R s< 0
18681 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18682 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18685 // R s>> a === ((R u>> a) ^ m) - m
18686 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18687 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18689 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18690 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18691 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18694 llvm_unreachable("Unknown shift opcode.");
18699 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18700 if (!Subtarget->is64Bit() &&
18701 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18702 Amt.getOpcode() == ISD::BITCAST &&
18703 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18704 Amt = Amt.getOperand(0);
18705 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18706 VT.getVectorNumElements();
18707 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18708 uint64_t ShiftAmt = 0;
18709 for (unsigned i = 0; i != Ratio; ++i) {
18710 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18714 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18716 // Check remaining shift amounts.
18717 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18718 uint64_t ShAmt = 0;
18719 for (unsigned j = 0; j != Ratio; ++j) {
18720 ConstantSDNode *C =
18721 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18725 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18727 if (ShAmt != ShiftAmt)
18730 switch (Op.getOpcode()) {
18732 llvm_unreachable("Unknown shift opcode!");
18734 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18737 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18740 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18748 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18749 const X86Subtarget* Subtarget) {
18750 MVT VT = Op.getSimpleValueType();
18752 SDValue R = Op.getOperand(0);
18753 SDValue Amt = Op.getOperand(1);
18755 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18756 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18757 (Subtarget->hasInt256() &&
18758 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18759 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18760 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18762 EVT EltVT = VT.getVectorElementType();
18764 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18765 // Check if this build_vector node is doing a splat.
18766 // If so, then set BaseShAmt equal to the splat value.
18767 BaseShAmt = BV->getSplatValue();
18768 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18769 BaseShAmt = SDValue();
18771 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18772 Amt = Amt.getOperand(0);
18774 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18775 if (SVN && SVN->isSplat()) {
18776 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18777 SDValue InVec = Amt.getOperand(0);
18778 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18779 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18780 "Unexpected shuffle index found!");
18781 BaseShAmt = InVec.getOperand(SplatIdx);
18782 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18783 if (ConstantSDNode *C =
18784 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18785 if (C->getZExtValue() == SplatIdx)
18786 BaseShAmt = InVec.getOperand(1);
18791 // Avoid introducing an extract element from a shuffle.
18792 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18793 DAG.getIntPtrConstant(SplatIdx));
18797 if (BaseShAmt.getNode()) {
18798 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18799 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18800 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18801 else if (EltVT.bitsLT(MVT::i32))
18802 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18804 switch (Op.getOpcode()) {
18806 llvm_unreachable("Unknown shift opcode!");
18808 switch (VT.SimpleTy) {
18809 default: return SDValue();
18818 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18821 switch (VT.SimpleTy) {
18822 default: return SDValue();
18829 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18832 switch (VT.SimpleTy) {
18833 default: return SDValue();
18842 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18848 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18849 if (!Subtarget->is64Bit() &&
18850 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18851 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18852 Amt.getOpcode() == ISD::BITCAST &&
18853 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18854 Amt = Amt.getOperand(0);
18855 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18856 VT.getVectorNumElements();
18857 std::vector<SDValue> Vals(Ratio);
18858 for (unsigned i = 0; i != Ratio; ++i)
18859 Vals[i] = Amt.getOperand(i);
18860 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18861 for (unsigned j = 0; j != Ratio; ++j)
18862 if (Vals[j] != Amt.getOperand(i + j))
18865 switch (Op.getOpcode()) {
18867 llvm_unreachable("Unknown shift opcode!");
18869 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18871 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18873 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
18880 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
18881 SelectionDAG &DAG) {
18882 MVT VT = Op.getSimpleValueType();
18884 SDValue R = Op.getOperand(0);
18885 SDValue Amt = Op.getOperand(1);
18888 assert(VT.isVector() && "Custom lowering only for vector shifts!");
18889 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
18891 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
18895 V = LowerScalarVariableShift(Op, DAG, Subtarget);
18899 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
18901 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
18902 if (Subtarget->hasInt256()) {
18903 if (Op.getOpcode() == ISD::SRL &&
18904 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18905 VT == MVT::v4i64 || VT == MVT::v8i32))
18907 if (Op.getOpcode() == ISD::SHL &&
18908 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18909 VT == MVT::v4i64 || VT == MVT::v8i32))
18911 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
18915 // If possible, lower this packed shift into a vector multiply instead of
18916 // expanding it into a sequence of scalar shifts.
18917 // Do this only if the vector shift count is a constant build_vector.
18918 if (Op.getOpcode() == ISD::SHL &&
18919 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
18920 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
18921 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18922 SmallVector<SDValue, 8> Elts;
18923 EVT SVT = VT.getScalarType();
18924 unsigned SVTBits = SVT.getSizeInBits();
18925 const APInt &One = APInt(SVTBits, 1);
18926 unsigned NumElems = VT.getVectorNumElements();
18928 for (unsigned i=0; i !=NumElems; ++i) {
18929 SDValue Op = Amt->getOperand(i);
18930 if (Op->getOpcode() == ISD::UNDEF) {
18931 Elts.push_back(Op);
18935 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
18936 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
18937 uint64_t ShAmt = C.getZExtValue();
18938 if (ShAmt >= SVTBits) {
18939 Elts.push_back(DAG.getUNDEF(SVT));
18942 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
18944 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
18945 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
18948 // Lower SHL with variable shift amount.
18949 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
18950 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
18952 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
18953 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
18954 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
18955 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
18958 // If possible, lower this shift as a sequence of two shifts by
18959 // constant plus a MOVSS/MOVSD instead of scalarizing it.
18961 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
18963 // Could be rewritten as:
18964 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
18966 // The advantage is that the two shifts from the example would be
18967 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
18968 // the vector shift into four scalar shifts plus four pairs of vector
18970 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
18971 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18972 unsigned TargetOpcode = X86ISD::MOVSS;
18973 bool CanBeSimplified;
18974 // The splat value for the first packed shift (the 'X' from the example).
18975 SDValue Amt1 = Amt->getOperand(0);
18976 // The splat value for the second packed shift (the 'Y' from the example).
18977 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
18978 Amt->getOperand(2);
18980 // See if it is possible to replace this node with a sequence of
18981 // two shifts followed by a MOVSS/MOVSD
18982 if (VT == MVT::v4i32) {
18983 // Check if it is legal to use a MOVSS.
18984 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
18985 Amt2 == Amt->getOperand(3);
18986 if (!CanBeSimplified) {
18987 // Otherwise, check if we can still simplify this node using a MOVSD.
18988 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
18989 Amt->getOperand(2) == Amt->getOperand(3);
18990 TargetOpcode = X86ISD::MOVSD;
18991 Amt2 = Amt->getOperand(2);
18994 // Do similar checks for the case where the machine value type
18996 CanBeSimplified = Amt1 == Amt->getOperand(1);
18997 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
18998 CanBeSimplified = Amt2 == Amt->getOperand(i);
19000 if (!CanBeSimplified) {
19001 TargetOpcode = X86ISD::MOVSD;
19002 CanBeSimplified = true;
19003 Amt2 = Amt->getOperand(4);
19004 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19005 CanBeSimplified = Amt1 == Amt->getOperand(i);
19006 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19007 CanBeSimplified = Amt2 == Amt->getOperand(j);
19011 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19012 isa<ConstantSDNode>(Amt2)) {
19013 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19014 EVT CastVT = MVT::v4i32;
19016 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19017 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19019 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19020 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19021 if (TargetOpcode == X86ISD::MOVSD)
19022 CastVT = MVT::v2i64;
19023 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19024 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19025 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19027 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19031 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19032 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19035 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19036 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19038 // Turn 'a' into a mask suitable for VSELECT
19039 SDValue VSelM = DAG.getConstant(0x80, VT);
19040 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19041 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19043 SDValue CM1 = DAG.getConstant(0x0f, VT);
19044 SDValue CM2 = DAG.getConstant(0x3f, VT);
19046 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19047 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19048 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19049 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19050 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19053 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19054 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19055 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19057 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19058 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19059 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19060 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19061 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19064 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19065 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19066 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19068 // return VSELECT(r, r+r, a);
19069 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19070 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19074 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19075 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19076 // solution better.
19077 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19078 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19080 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19081 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19082 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19083 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19084 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19087 // Decompose 256-bit shifts into smaller 128-bit shifts.
19088 if (VT.is256BitVector()) {
19089 unsigned NumElems = VT.getVectorNumElements();
19090 MVT EltVT = VT.getVectorElementType();
19091 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19093 // Extract the two vectors
19094 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19095 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19097 // Recreate the shift amount vectors
19098 SDValue Amt1, Amt2;
19099 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19100 // Constant shift amount
19101 SmallVector<SDValue, 4> Amt1Csts;
19102 SmallVector<SDValue, 4> Amt2Csts;
19103 for (unsigned i = 0; i != NumElems/2; ++i)
19104 Amt1Csts.push_back(Amt->getOperand(i));
19105 for (unsigned i = NumElems/2; i != NumElems; ++i)
19106 Amt2Csts.push_back(Amt->getOperand(i));
19108 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19109 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19111 // Variable shift amount
19112 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19113 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19116 // Issue new vector shifts for the smaller types
19117 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19118 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19120 // Concatenate the result back
19121 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19127 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19128 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19129 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19130 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19131 // has only one use.
19132 SDNode *N = Op.getNode();
19133 SDValue LHS = N->getOperand(0);
19134 SDValue RHS = N->getOperand(1);
19135 unsigned BaseOp = 0;
19138 switch (Op.getOpcode()) {
19139 default: llvm_unreachable("Unknown ovf instruction!");
19141 // A subtract of one will be selected as a INC. Note that INC doesn't
19142 // set CF, so we can't do this for UADDO.
19143 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19145 BaseOp = X86ISD::INC;
19146 Cond = X86::COND_O;
19149 BaseOp = X86ISD::ADD;
19150 Cond = X86::COND_O;
19153 BaseOp = X86ISD::ADD;
19154 Cond = X86::COND_B;
19157 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19158 // set CF, so we can't do this for USUBO.
19159 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19161 BaseOp = X86ISD::DEC;
19162 Cond = X86::COND_O;
19165 BaseOp = X86ISD::SUB;
19166 Cond = X86::COND_O;
19169 BaseOp = X86ISD::SUB;
19170 Cond = X86::COND_B;
19173 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19174 Cond = X86::COND_O;
19176 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19177 if (N->getValueType(0) == MVT::i8) {
19178 BaseOp = X86ISD::UMUL8;
19179 Cond = X86::COND_O;
19182 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19184 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19187 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19188 DAG.getConstant(X86::COND_O, MVT::i32),
19189 SDValue(Sum.getNode(), 2));
19191 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19195 // Also sets EFLAGS.
19196 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19197 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19200 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19201 DAG.getConstant(Cond, MVT::i32),
19202 SDValue(Sum.getNode(), 1));
19204 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19207 // Sign extension of the low part of vector elements. This may be used either
19208 // when sign extend instructions are not available or if the vector element
19209 // sizes already match the sign-extended size. If the vector elements are in
19210 // their pre-extended size and sign extend instructions are available, that will
19211 // be handled by LowerSIGN_EXTEND.
19212 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19213 SelectionDAG &DAG) const {
19215 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19216 MVT VT = Op.getSimpleValueType();
19218 if (!Subtarget->hasSSE2() || !VT.isVector())
19221 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19222 ExtraVT.getScalarType().getSizeInBits();
19224 switch (VT.SimpleTy) {
19225 default: return SDValue();
19228 if (!Subtarget->hasFp256())
19230 if (!Subtarget->hasInt256()) {
19231 // needs to be split
19232 unsigned NumElems = VT.getVectorNumElements();
19234 // Extract the LHS vectors
19235 SDValue LHS = Op.getOperand(0);
19236 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19237 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19239 MVT EltVT = VT.getVectorElementType();
19240 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19242 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19243 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19244 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19246 SDValue Extra = DAG.getValueType(ExtraVT);
19248 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19249 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19251 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19256 SDValue Op0 = Op.getOperand(0);
19258 // This is a sign extension of some low part of vector elements without
19259 // changing the size of the vector elements themselves:
19260 // Shift-Left + Shift-Right-Algebraic.
19261 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19263 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19269 /// Returns true if the operand type is exactly twice the native width, and
19270 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19271 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19272 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19273 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19274 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19277 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19278 else if (OpWidth == 128)
19279 return Subtarget->hasCmpxchg16b();
19284 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19285 return needsCmpXchgNb(SI->getValueOperand()->getType());
19288 // Note: this turns large loads into lock cmpxchg8b/16b.
19289 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19290 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19291 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19292 return needsCmpXchgNb(PTy->getElementType());
19295 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19296 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19297 const Type *MemType = AI->getType();
19299 // If the operand is too big, we must see if cmpxchg8/16b is available
19300 // and default to library calls otherwise.
19301 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19302 return needsCmpXchgNb(MemType);
19304 AtomicRMWInst::BinOp Op = AI->getOperation();
19307 llvm_unreachable("Unknown atomic operation");
19308 case AtomicRMWInst::Xchg:
19309 case AtomicRMWInst::Add:
19310 case AtomicRMWInst::Sub:
19311 // It's better to use xadd, xsub or xchg for these in all cases.
19313 case AtomicRMWInst::Or:
19314 case AtomicRMWInst::And:
19315 case AtomicRMWInst::Xor:
19316 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19317 // prefix to a normal instruction for these operations.
19318 return !AI->use_empty();
19319 case AtomicRMWInst::Nand:
19320 case AtomicRMWInst::Max:
19321 case AtomicRMWInst::Min:
19322 case AtomicRMWInst::UMax:
19323 case AtomicRMWInst::UMin:
19324 // These always require a non-trivial set of data operations on x86. We must
19325 // use a cmpxchg loop.
19330 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19331 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19332 // no-sse2). There isn't any reason to disable it if the target processor
19334 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19338 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19339 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19340 const Type *MemType = AI->getType();
19341 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19342 // there is no benefit in turning such RMWs into loads, and it is actually
19343 // harmful as it introduces a mfence.
19344 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19347 auto Builder = IRBuilder<>(AI);
19348 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19349 auto SynchScope = AI->getSynchScope();
19350 // We must restrict the ordering to avoid generating loads with Release or
19351 // ReleaseAcquire orderings.
19352 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19353 auto Ptr = AI->getPointerOperand();
19355 // Before the load we need a fence. Here is an example lifted from
19356 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19359 // x.store(1, relaxed);
19360 // r1 = y.fetch_add(0, release);
19362 // y.fetch_add(42, acquire);
19363 // r2 = x.load(relaxed);
19364 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19365 // lowered to just a load without a fence. A mfence flushes the store buffer,
19366 // making the optimization clearly correct.
19367 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19368 // otherwise, we might be able to be more agressive on relaxed idempotent
19369 // rmw. In practice, they do not look useful, so we don't try to be
19370 // especially clever.
19371 if (SynchScope == SingleThread) {
19372 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19373 // the IR level, so we must wrap it in an intrinsic.
19375 } else if (hasMFENCE(*Subtarget)) {
19376 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19377 Intrinsic::x86_sse2_mfence);
19378 Builder.CreateCall(MFence);
19380 // FIXME: it might make sense to use a locked operation here but on a
19381 // different cache-line to prevent cache-line bouncing. In practice it
19382 // is probably a small win, and x86 processors without mfence are rare
19383 // enough that we do not bother.
19387 // Finally we can emit the atomic load.
19388 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19389 AI->getType()->getPrimitiveSizeInBits());
19390 Loaded->setAtomic(Order, SynchScope);
19391 AI->replaceAllUsesWith(Loaded);
19392 AI->eraseFromParent();
19396 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19397 SelectionDAG &DAG) {
19399 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19400 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19401 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19402 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19404 // The only fence that needs an instruction is a sequentially-consistent
19405 // cross-thread fence.
19406 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19407 if (hasMFENCE(*Subtarget))
19408 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19410 SDValue Chain = Op.getOperand(0);
19411 SDValue Zero = DAG.getConstant(0, MVT::i32);
19413 DAG.getRegister(X86::ESP, MVT::i32), // Base
19414 DAG.getTargetConstant(1, MVT::i8), // Scale
19415 DAG.getRegister(0, MVT::i32), // Index
19416 DAG.getTargetConstant(0, MVT::i32), // Disp
19417 DAG.getRegister(0, MVT::i32), // Segment.
19421 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19422 return SDValue(Res, 0);
19425 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19426 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19429 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19430 SelectionDAG &DAG) {
19431 MVT T = Op.getSimpleValueType();
19435 switch(T.SimpleTy) {
19436 default: llvm_unreachable("Invalid value type!");
19437 case MVT::i8: Reg = X86::AL; size = 1; break;
19438 case MVT::i16: Reg = X86::AX; size = 2; break;
19439 case MVT::i32: Reg = X86::EAX; size = 4; break;
19441 assert(Subtarget->is64Bit() && "Node not type legal!");
19442 Reg = X86::RAX; size = 8;
19445 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19446 Op.getOperand(2), SDValue());
19447 SDValue Ops[] = { cpIn.getValue(0),
19450 DAG.getTargetConstant(size, MVT::i8),
19451 cpIn.getValue(1) };
19452 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19453 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19454 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19458 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19459 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19460 MVT::i32, cpOut.getValue(2));
19461 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19462 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19464 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19465 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19466 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19470 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19471 SelectionDAG &DAG) {
19472 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19473 MVT DstVT = Op.getSimpleValueType();
19475 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19476 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19477 if (DstVT != MVT::f64)
19478 // This conversion needs to be expanded.
19481 SDValue InVec = Op->getOperand(0);
19483 unsigned NumElts = SrcVT.getVectorNumElements();
19484 EVT SVT = SrcVT.getVectorElementType();
19486 // Widen the vector in input in the case of MVT::v2i32.
19487 // Example: from MVT::v2i32 to MVT::v4i32.
19488 SmallVector<SDValue, 16> Elts;
19489 for (unsigned i = 0, e = NumElts; i != e; ++i)
19490 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19491 DAG.getIntPtrConstant(i)));
19493 // Explicitly mark the extra elements as Undef.
19494 SDValue Undef = DAG.getUNDEF(SVT);
19495 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19496 Elts.push_back(Undef);
19498 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19499 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19500 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19501 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19502 DAG.getIntPtrConstant(0));
19505 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19506 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19507 assert((DstVT == MVT::i64 ||
19508 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19509 "Unexpected custom BITCAST");
19510 // i64 <=> MMX conversions are Legal.
19511 if (SrcVT==MVT::i64 && DstVT.isVector())
19513 if (DstVT==MVT::i64 && SrcVT.isVector())
19515 // MMX <=> MMX conversions are Legal.
19516 if (SrcVT.isVector() && DstVT.isVector())
19518 // All other conversions need to be expanded.
19522 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19523 SelectionDAG &DAG) {
19524 SDNode *Node = Op.getNode();
19527 Op = Op.getOperand(0);
19528 EVT VT = Op.getValueType();
19529 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19530 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19532 unsigned NumElts = VT.getVectorNumElements();
19533 EVT EltVT = VT.getVectorElementType();
19534 unsigned Len = EltVT.getSizeInBits();
19536 // This is the vectorized version of the "best" algorithm from
19537 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19538 // with a minor tweak to use a series of adds + shifts instead of vector
19539 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19541 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19542 // v8i32 => Always profitable
19544 // FIXME: There a couple of possible improvements:
19546 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19547 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19549 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19550 "CTPOP not implemented for this vector element type.");
19552 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19553 // extra legalization.
19554 bool NeedsBitcast = EltVT == MVT::i32;
19555 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19557 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19558 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19559 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19561 // v = v - ((v >> 1) & 0x55555555...)
19562 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19563 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19564 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19566 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19568 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19569 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19571 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19573 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19574 if (VT != And.getValueType())
19575 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19576 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19578 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19579 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19580 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19581 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19582 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19584 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19585 if (NeedsBitcast) {
19586 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19587 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19588 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19591 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19592 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19593 if (VT != AndRHS.getValueType()) {
19594 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19595 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19597 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19599 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19600 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19601 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19602 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19603 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19605 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19606 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19607 if (NeedsBitcast) {
19608 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19609 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19611 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19612 if (VT != And.getValueType())
19613 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19615 // The algorithm mentioned above uses:
19616 // v = (v * 0x01010101...) >> (Len - 8)
19618 // Change it to use vector adds + vector shifts which yield faster results on
19619 // Haswell than using vector integer multiplication.
19621 // For i32 elements:
19622 // v = v + (v >> 8)
19623 // v = v + (v >> 16)
19625 // For i64 elements:
19626 // v = v + (v >> 8)
19627 // v = v + (v >> 16)
19628 // v = v + (v >> 32)
19631 SmallVector<SDValue, 8> Csts;
19632 for (unsigned i = 8; i <= Len/2; i *= 2) {
19633 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19634 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19635 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19636 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19640 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19641 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19642 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19643 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19644 if (NeedsBitcast) {
19645 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19646 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19648 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19649 if (VT != And.getValueType())
19650 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19655 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19656 SDNode *Node = Op.getNode();
19658 EVT T = Node->getValueType(0);
19659 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19660 DAG.getConstant(0, T), Node->getOperand(2));
19661 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19662 cast<AtomicSDNode>(Node)->getMemoryVT(),
19663 Node->getOperand(0),
19664 Node->getOperand(1), negOp,
19665 cast<AtomicSDNode>(Node)->getMemOperand(),
19666 cast<AtomicSDNode>(Node)->getOrdering(),
19667 cast<AtomicSDNode>(Node)->getSynchScope());
19670 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19671 SDNode *Node = Op.getNode();
19673 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19675 // Convert seq_cst store -> xchg
19676 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19677 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19678 // (The only way to get a 16-byte store is cmpxchg16b)
19679 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19680 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19681 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19682 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19683 cast<AtomicSDNode>(Node)->getMemoryVT(),
19684 Node->getOperand(0),
19685 Node->getOperand(1), Node->getOperand(2),
19686 cast<AtomicSDNode>(Node)->getMemOperand(),
19687 cast<AtomicSDNode>(Node)->getOrdering(),
19688 cast<AtomicSDNode>(Node)->getSynchScope());
19689 return Swap.getValue(1);
19691 // Other atomic stores have a simple pattern.
19695 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19696 EVT VT = Op.getNode()->getSimpleValueType(0);
19698 // Let legalize expand this if it isn't a legal type yet.
19699 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19702 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19705 bool ExtraOp = false;
19706 switch (Op.getOpcode()) {
19707 default: llvm_unreachable("Invalid code");
19708 case ISD::ADDC: Opc = X86ISD::ADD; break;
19709 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19710 case ISD::SUBC: Opc = X86ISD::SUB; break;
19711 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19715 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19717 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19718 Op.getOperand(1), Op.getOperand(2));
19721 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19722 SelectionDAG &DAG) {
19723 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19725 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19726 // which returns the values as { float, float } (in XMM0) or
19727 // { double, double } (which is returned in XMM0, XMM1).
19729 SDValue Arg = Op.getOperand(0);
19730 EVT ArgVT = Arg.getValueType();
19731 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19733 TargetLowering::ArgListTy Args;
19734 TargetLowering::ArgListEntry Entry;
19738 Entry.isSExt = false;
19739 Entry.isZExt = false;
19740 Args.push_back(Entry);
19742 bool isF64 = ArgVT == MVT::f64;
19743 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19744 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19745 // the results are returned via SRet in memory.
19746 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19747 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19748 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19750 Type *RetTy = isF64
19751 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19752 : (Type*)VectorType::get(ArgTy, 4);
19754 TargetLowering::CallLoweringInfo CLI(DAG);
19755 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19756 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19758 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19761 // Returned in xmm0 and xmm1.
19762 return CallResult.first;
19764 // Returned in bits 0:31 and 32:64 xmm0.
19765 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19766 CallResult.first, DAG.getIntPtrConstant(0));
19767 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19768 CallResult.first, DAG.getIntPtrConstant(1));
19769 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19770 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19773 /// LowerOperation - Provide custom lowering hooks for some operations.
19775 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19776 switch (Op.getOpcode()) {
19777 default: llvm_unreachable("Should not custom lower this!");
19778 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19779 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19780 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19781 return LowerCMP_SWAP(Op, Subtarget, DAG);
19782 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19783 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19784 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19785 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19786 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19787 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19788 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19789 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19790 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19791 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19792 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19793 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19794 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19795 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19796 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19797 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19798 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19799 case ISD::SHL_PARTS:
19800 case ISD::SRA_PARTS:
19801 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19802 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19803 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19804 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19805 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19806 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19807 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19808 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19809 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19810 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19811 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19813 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19814 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19815 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19816 case ISD::SETCC: return LowerSETCC(Op, DAG);
19817 case ISD::SELECT: return LowerSELECT(Op, DAG);
19818 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19819 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19820 case ISD::VASTART: return LowerVASTART(Op, DAG);
19821 case ISD::VAARG: return LowerVAARG(Op, DAG);
19822 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19823 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19824 case ISD::INTRINSIC_VOID:
19825 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19826 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19827 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19828 case ISD::FRAME_TO_ARGS_OFFSET:
19829 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19830 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19831 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19832 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19833 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19834 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19835 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19836 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19837 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19838 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19839 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19840 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19841 case ISD::UMUL_LOHI:
19842 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19845 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19851 case ISD::UMULO: return LowerXALUO(Op, DAG);
19852 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19853 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19857 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19858 case ISD::ADD: return LowerADD(Op, DAG);
19859 case ISD::SUB: return LowerSUB(Op, DAG);
19860 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19864 /// ReplaceNodeResults - Replace a node with an illegal result type
19865 /// with a new node built out of custom code.
19866 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19867 SmallVectorImpl<SDValue>&Results,
19868 SelectionDAG &DAG) const {
19870 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19871 switch (N->getOpcode()) {
19873 llvm_unreachable("Do not know how to custom type legalize this operation!");
19874 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
19875 case X86ISD::FMINC:
19877 case X86ISD::FMAXC:
19878 case X86ISD::FMAX: {
19879 EVT VT = N->getValueType(0);
19880 if (VT != MVT::v2f32)
19881 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
19882 SDValue UNDEF = DAG.getUNDEF(VT);
19883 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19884 N->getOperand(0), UNDEF);
19885 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19886 N->getOperand(1), UNDEF);
19887 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
19890 case ISD::SIGN_EXTEND_INREG:
19895 // We don't want to expand or promote these.
19902 case ISD::UDIVREM: {
19903 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
19904 Results.push_back(V);
19907 case ISD::FP_TO_SINT:
19908 case ISD::FP_TO_UINT: {
19909 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
19911 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
19914 std::pair<SDValue,SDValue> Vals =
19915 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
19916 SDValue FIST = Vals.first, StackSlot = Vals.second;
19917 if (FIST.getNode()) {
19918 EVT VT = N->getValueType(0);
19919 // Return a load from the stack slot.
19920 if (StackSlot.getNode())
19921 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
19922 MachinePointerInfo(),
19923 false, false, false, 0));
19925 Results.push_back(FIST);
19929 case ISD::UINT_TO_FP: {
19930 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19931 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
19932 N->getValueType(0) != MVT::v2f32)
19934 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
19936 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
19938 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
19939 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
19940 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
19941 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
19942 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
19943 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
19946 case ISD::FP_ROUND: {
19947 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
19949 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
19950 Results.push_back(V);
19953 case ISD::INTRINSIC_W_CHAIN: {
19954 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19956 default : llvm_unreachable("Do not know how to custom type "
19957 "legalize this intrinsic operation!");
19958 case Intrinsic::x86_rdtsc:
19959 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19961 case Intrinsic::x86_rdtscp:
19962 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
19964 case Intrinsic::x86_rdpmc:
19965 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
19968 case ISD::READCYCLECOUNTER: {
19969 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19972 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
19973 EVT T = N->getValueType(0);
19974 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
19975 bool Regs64bit = T == MVT::i128;
19976 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
19977 SDValue cpInL, cpInH;
19978 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19979 DAG.getConstant(0, HalfT));
19980 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19981 DAG.getConstant(1, HalfT));
19982 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
19983 Regs64bit ? X86::RAX : X86::EAX,
19985 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
19986 Regs64bit ? X86::RDX : X86::EDX,
19987 cpInH, cpInL.getValue(1));
19988 SDValue swapInL, swapInH;
19989 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19990 DAG.getConstant(0, HalfT));
19991 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19992 DAG.getConstant(1, HalfT));
19993 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
19994 Regs64bit ? X86::RBX : X86::EBX,
19995 swapInL, cpInH.getValue(1));
19996 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
19997 Regs64bit ? X86::RCX : X86::ECX,
19998 swapInH, swapInL.getValue(1));
19999 SDValue Ops[] = { swapInH.getValue(0),
20001 swapInH.getValue(1) };
20002 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20003 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20004 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20005 X86ISD::LCMPXCHG8_DAG;
20006 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20007 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20008 Regs64bit ? X86::RAX : X86::EAX,
20009 HalfT, Result.getValue(1));
20010 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20011 Regs64bit ? X86::RDX : X86::EDX,
20012 HalfT, cpOutL.getValue(2));
20013 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20015 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20016 MVT::i32, cpOutH.getValue(2));
20018 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20019 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20020 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20022 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20023 Results.push_back(Success);
20024 Results.push_back(EFLAGS.getValue(1));
20027 case ISD::ATOMIC_SWAP:
20028 case ISD::ATOMIC_LOAD_ADD:
20029 case ISD::ATOMIC_LOAD_SUB:
20030 case ISD::ATOMIC_LOAD_AND:
20031 case ISD::ATOMIC_LOAD_OR:
20032 case ISD::ATOMIC_LOAD_XOR:
20033 case ISD::ATOMIC_LOAD_NAND:
20034 case ISD::ATOMIC_LOAD_MIN:
20035 case ISD::ATOMIC_LOAD_MAX:
20036 case ISD::ATOMIC_LOAD_UMIN:
20037 case ISD::ATOMIC_LOAD_UMAX:
20038 case ISD::ATOMIC_LOAD: {
20039 // Delegate to generic TypeLegalization. Situations we can really handle
20040 // should have already been dealt with by AtomicExpandPass.cpp.
20043 case ISD::BITCAST: {
20044 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20045 EVT DstVT = N->getValueType(0);
20046 EVT SrcVT = N->getOperand(0)->getValueType(0);
20048 if (SrcVT != MVT::f64 ||
20049 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20052 unsigned NumElts = DstVT.getVectorNumElements();
20053 EVT SVT = DstVT.getVectorElementType();
20054 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20055 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20056 MVT::v2f64, N->getOperand(0));
20057 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20059 if (ExperimentalVectorWideningLegalization) {
20060 // If we are legalizing vectors by widening, we already have the desired
20061 // legal vector type, just return it.
20062 Results.push_back(ToVecInt);
20066 SmallVector<SDValue, 8> Elts;
20067 for (unsigned i = 0, e = NumElts; i != e; ++i)
20068 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20069 ToVecInt, DAG.getIntPtrConstant(i)));
20071 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20076 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20078 default: return nullptr;
20079 case X86ISD::BSF: return "X86ISD::BSF";
20080 case X86ISD::BSR: return "X86ISD::BSR";
20081 case X86ISD::SHLD: return "X86ISD::SHLD";
20082 case X86ISD::SHRD: return "X86ISD::SHRD";
20083 case X86ISD::FAND: return "X86ISD::FAND";
20084 case X86ISD::FANDN: return "X86ISD::FANDN";
20085 case X86ISD::FOR: return "X86ISD::FOR";
20086 case X86ISD::FXOR: return "X86ISD::FXOR";
20087 case X86ISD::FSRL: return "X86ISD::FSRL";
20088 case X86ISD::FILD: return "X86ISD::FILD";
20089 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20090 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20091 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20092 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20093 case X86ISD::FLD: return "X86ISD::FLD";
20094 case X86ISD::FST: return "X86ISD::FST";
20095 case X86ISD::CALL: return "X86ISD::CALL";
20096 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20097 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20098 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20099 case X86ISD::BT: return "X86ISD::BT";
20100 case X86ISD::CMP: return "X86ISD::CMP";
20101 case X86ISD::COMI: return "X86ISD::COMI";
20102 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20103 case X86ISD::CMPM: return "X86ISD::CMPM";
20104 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20105 case X86ISD::SETCC: return "X86ISD::SETCC";
20106 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20107 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20108 case X86ISD::CMOV: return "X86ISD::CMOV";
20109 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20110 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20111 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20112 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20113 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20114 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20115 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20116 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20117 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20118 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20119 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20120 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20121 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20122 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20123 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20124 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20125 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20126 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20127 case X86ISD::HADD: return "X86ISD::HADD";
20128 case X86ISD::HSUB: return "X86ISD::HSUB";
20129 case X86ISD::FHADD: return "X86ISD::FHADD";
20130 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20131 case X86ISD::UMAX: return "X86ISD::UMAX";
20132 case X86ISD::UMIN: return "X86ISD::UMIN";
20133 case X86ISD::SMAX: return "X86ISD::SMAX";
20134 case X86ISD::SMIN: return "X86ISD::SMIN";
20135 case X86ISD::FMAX: return "X86ISD::FMAX";
20136 case X86ISD::FMIN: return "X86ISD::FMIN";
20137 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20138 case X86ISD::FMINC: return "X86ISD::FMINC";
20139 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20140 case X86ISD::FRCP: return "X86ISD::FRCP";
20141 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20142 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20143 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20144 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20145 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20146 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20147 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20148 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20149 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20150 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20151 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20152 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20153 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20154 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20155 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20156 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20157 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20158 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20159 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20160 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20161 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20162 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20163 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20164 case X86ISD::VSHL: return "X86ISD::VSHL";
20165 case X86ISD::VSRL: return "X86ISD::VSRL";
20166 case X86ISD::VSRA: return "X86ISD::VSRA";
20167 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20168 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20169 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20170 case X86ISD::CMPP: return "X86ISD::CMPP";
20171 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20172 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20173 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20174 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20175 case X86ISD::ADD: return "X86ISD::ADD";
20176 case X86ISD::SUB: return "X86ISD::SUB";
20177 case X86ISD::ADC: return "X86ISD::ADC";
20178 case X86ISD::SBB: return "X86ISD::SBB";
20179 case X86ISD::SMUL: return "X86ISD::SMUL";
20180 case X86ISD::UMUL: return "X86ISD::UMUL";
20181 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20182 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20183 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20184 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20185 case X86ISD::INC: return "X86ISD::INC";
20186 case X86ISD::DEC: return "X86ISD::DEC";
20187 case X86ISD::OR: return "X86ISD::OR";
20188 case X86ISD::XOR: return "X86ISD::XOR";
20189 case X86ISD::AND: return "X86ISD::AND";
20190 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20191 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20192 case X86ISD::PTEST: return "X86ISD::PTEST";
20193 case X86ISD::TESTP: return "X86ISD::TESTP";
20194 case X86ISD::TESTM: return "X86ISD::TESTM";
20195 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20196 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20197 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20198 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20199 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20200 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20201 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20202 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20203 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20204 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20205 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20206 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20207 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20208 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20209 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20210 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20211 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20212 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20213 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20214 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20215 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20216 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20217 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20218 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20219 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20220 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20221 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20222 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20223 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20224 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20225 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20226 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20227 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20228 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20229 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20230 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20231 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20232 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20233 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20234 case X86ISD::SAHF: return "X86ISD::SAHF";
20235 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20236 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20237 case X86ISD::FMADD: return "X86ISD::FMADD";
20238 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20239 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20240 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20241 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20242 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20243 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20244 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20245 case X86ISD::XTEST: return "X86ISD::XTEST";
20246 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20247 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20248 case X86ISD::SELECT: return "X86ISD::SELECT";
20249 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20250 case X86ISD::RCP28: return "X86ISD::RCP28";
20251 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20255 // isLegalAddressingMode - Return true if the addressing mode represented
20256 // by AM is legal for this target, for a load/store of the specified type.
20257 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20259 // X86 supports extremely general addressing modes.
20260 CodeModel::Model M = getTargetMachine().getCodeModel();
20261 Reloc::Model R = getTargetMachine().getRelocationModel();
20263 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20264 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20269 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20271 // If a reference to this global requires an extra load, we can't fold it.
20272 if (isGlobalStubReference(GVFlags))
20275 // If BaseGV requires a register for the PIC base, we cannot also have a
20276 // BaseReg specified.
20277 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20280 // If lower 4G is not available, then we must use rip-relative addressing.
20281 if ((M != CodeModel::Small || R != Reloc::Static) &&
20282 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20286 switch (AM.Scale) {
20292 // These scales always work.
20297 // These scales are formed with basereg+scalereg. Only accept if there is
20302 default: // Other stuff never works.
20309 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20310 unsigned Bits = Ty->getScalarSizeInBits();
20312 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20313 // particularly cheaper than those without.
20317 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20318 // variable shifts just as cheap as scalar ones.
20319 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20322 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20323 // fully general vector.
20327 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20328 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20330 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20331 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20332 return NumBits1 > NumBits2;
20335 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20336 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20339 if (!isTypeLegal(EVT::getEVT(Ty1)))
20342 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20344 // Assuming the caller doesn't have a zeroext or signext return parameter,
20345 // truncation all the way down to i1 is valid.
20349 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20350 return isInt<32>(Imm);
20353 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20354 // Can also use sub to handle negated immediates.
20355 return isInt<32>(Imm);
20358 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20359 if (!VT1.isInteger() || !VT2.isInteger())
20361 unsigned NumBits1 = VT1.getSizeInBits();
20362 unsigned NumBits2 = VT2.getSizeInBits();
20363 return NumBits1 > NumBits2;
20366 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20367 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20368 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20371 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20372 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20373 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20376 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20377 EVT VT1 = Val.getValueType();
20378 if (isZExtFree(VT1, VT2))
20381 if (Val.getOpcode() != ISD::LOAD)
20384 if (!VT1.isSimple() || !VT1.isInteger() ||
20385 !VT2.isSimple() || !VT2.isInteger())
20388 switch (VT1.getSimpleVT().SimpleTy) {
20393 // X86 has 8, 16, and 32-bit zero-extending loads.
20401 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20402 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20405 VT = VT.getScalarType();
20407 if (!VT.isSimple())
20410 switch (VT.getSimpleVT().SimpleTy) {
20421 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20422 // i16 instructions are longer (0x66 prefix) and potentially slower.
20423 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20426 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20427 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20428 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20429 /// are assumed to be legal.
20431 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20433 if (!VT.isSimple())
20436 MVT SVT = VT.getSimpleVT();
20438 // Very little shuffling can be done for 64-bit vectors right now.
20439 if (VT.getSizeInBits() == 64)
20442 // This is an experimental legality test that is tailored to match the
20443 // legality test of the experimental lowering more closely. They are gated
20444 // separately to ease testing of performance differences.
20445 if (ExperimentalVectorShuffleLegality)
20446 // We only care that the types being shuffled are legal. The lowering can
20447 // handle any possible shuffle mask that results.
20448 return isTypeLegal(SVT);
20450 // If this is a single-input shuffle with no 128 bit lane crossings we can
20451 // lower it into pshufb.
20452 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20453 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20454 bool isLegal = true;
20455 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20456 if (M[I] >= (int)SVT.getVectorNumElements() ||
20457 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20466 // FIXME: blends, shifts.
20467 return (SVT.getVectorNumElements() == 2 ||
20468 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20469 isMOVLMask(M, SVT) ||
20470 isCommutedMOVLMask(M, SVT) ||
20471 isMOVHLPSMask(M, SVT) ||
20472 isSHUFPMask(M, SVT) ||
20473 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20474 isPSHUFDMask(M, SVT) ||
20475 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20476 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20477 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20478 isPALIGNRMask(M, SVT, Subtarget) ||
20479 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20480 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20481 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20482 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20483 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20484 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20488 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20490 if (!VT.isSimple())
20493 MVT SVT = VT.getSimpleVT();
20495 // This is an experimental legality test that is tailored to match the
20496 // legality test of the experimental lowering more closely. They are gated
20497 // separately to ease testing of performance differences.
20498 if (ExperimentalVectorShuffleLegality)
20499 // The new vector shuffle lowering is very good at managing zero-inputs.
20500 return isShuffleMaskLegal(Mask, VT);
20502 unsigned NumElts = SVT.getVectorNumElements();
20503 // FIXME: This collection of masks seems suspect.
20506 if (NumElts == 4 && SVT.is128BitVector()) {
20507 return (isMOVLMask(Mask, SVT) ||
20508 isCommutedMOVLMask(Mask, SVT, true) ||
20509 isSHUFPMask(Mask, SVT) ||
20510 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20511 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20512 Subtarget->hasInt256()));
20517 //===----------------------------------------------------------------------===//
20518 // X86 Scheduler Hooks
20519 //===----------------------------------------------------------------------===//
20521 /// Utility function to emit xbegin specifying the start of an RTM region.
20522 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20523 const TargetInstrInfo *TII) {
20524 DebugLoc DL = MI->getDebugLoc();
20526 const BasicBlock *BB = MBB->getBasicBlock();
20527 MachineFunction::iterator I = MBB;
20530 // For the v = xbegin(), we generate
20541 MachineBasicBlock *thisMBB = MBB;
20542 MachineFunction *MF = MBB->getParent();
20543 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20544 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20545 MF->insert(I, mainMBB);
20546 MF->insert(I, sinkMBB);
20548 // Transfer the remainder of BB and its successor edges to sinkMBB.
20549 sinkMBB->splice(sinkMBB->begin(), MBB,
20550 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20551 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20555 // # fallthrough to mainMBB
20556 // # abortion to sinkMBB
20557 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20558 thisMBB->addSuccessor(mainMBB);
20559 thisMBB->addSuccessor(sinkMBB);
20563 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20564 mainMBB->addSuccessor(sinkMBB);
20567 // EAX is live into the sinkMBB
20568 sinkMBB->addLiveIn(X86::EAX);
20569 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20570 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20573 MI->eraseFromParent();
20577 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20578 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20579 // in the .td file.
20580 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20581 const TargetInstrInfo *TII) {
20583 switch (MI->getOpcode()) {
20584 default: llvm_unreachable("illegal opcode!");
20585 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20586 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20587 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20588 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20589 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20590 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20591 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20592 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20595 DebugLoc dl = MI->getDebugLoc();
20596 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20598 unsigned NumArgs = MI->getNumOperands();
20599 for (unsigned i = 1; i < NumArgs; ++i) {
20600 MachineOperand &Op = MI->getOperand(i);
20601 if (!(Op.isReg() && Op.isImplicit()))
20602 MIB.addOperand(Op);
20604 if (MI->hasOneMemOperand())
20605 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20607 BuildMI(*BB, MI, dl,
20608 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20609 .addReg(X86::XMM0);
20611 MI->eraseFromParent();
20615 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20616 // defs in an instruction pattern
20617 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20618 const TargetInstrInfo *TII) {
20620 switch (MI->getOpcode()) {
20621 default: llvm_unreachable("illegal opcode!");
20622 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20623 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20624 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20625 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20626 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20627 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20628 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20629 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20632 DebugLoc dl = MI->getDebugLoc();
20633 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20635 unsigned NumArgs = MI->getNumOperands(); // remove the results
20636 for (unsigned i = 1; i < NumArgs; ++i) {
20637 MachineOperand &Op = MI->getOperand(i);
20638 if (!(Op.isReg() && Op.isImplicit()))
20639 MIB.addOperand(Op);
20641 if (MI->hasOneMemOperand())
20642 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20644 BuildMI(*BB, MI, dl,
20645 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20648 MI->eraseFromParent();
20652 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20653 const X86Subtarget *Subtarget) {
20654 DebugLoc dl = MI->getDebugLoc();
20655 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20656 // Address into RAX/EAX, other two args into ECX, EDX.
20657 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20658 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20659 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20660 for (int i = 0; i < X86::AddrNumOperands; ++i)
20661 MIB.addOperand(MI->getOperand(i));
20663 unsigned ValOps = X86::AddrNumOperands;
20664 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20665 .addReg(MI->getOperand(ValOps).getReg());
20666 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20667 .addReg(MI->getOperand(ValOps+1).getReg());
20669 // The instruction doesn't actually take any operands though.
20670 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20672 MI->eraseFromParent(); // The pseudo is gone now.
20676 MachineBasicBlock *
20677 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20678 MachineBasicBlock *MBB) const {
20679 // Emit va_arg instruction on X86-64.
20681 // Operands to this pseudo-instruction:
20682 // 0 ) Output : destination address (reg)
20683 // 1-5) Input : va_list address (addr, i64mem)
20684 // 6 ) ArgSize : Size (in bytes) of vararg type
20685 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20686 // 8 ) Align : Alignment of type
20687 // 9 ) EFLAGS (implicit-def)
20689 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20690 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20692 unsigned DestReg = MI->getOperand(0).getReg();
20693 MachineOperand &Base = MI->getOperand(1);
20694 MachineOperand &Scale = MI->getOperand(2);
20695 MachineOperand &Index = MI->getOperand(3);
20696 MachineOperand &Disp = MI->getOperand(4);
20697 MachineOperand &Segment = MI->getOperand(5);
20698 unsigned ArgSize = MI->getOperand(6).getImm();
20699 unsigned ArgMode = MI->getOperand(7).getImm();
20700 unsigned Align = MI->getOperand(8).getImm();
20702 // Memory Reference
20703 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20704 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20705 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20707 // Machine Information
20708 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20709 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20710 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20711 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20712 DebugLoc DL = MI->getDebugLoc();
20714 // struct va_list {
20717 // i64 overflow_area (address)
20718 // i64 reg_save_area (address)
20720 // sizeof(va_list) = 24
20721 // alignment(va_list) = 8
20723 unsigned TotalNumIntRegs = 6;
20724 unsigned TotalNumXMMRegs = 8;
20725 bool UseGPOffset = (ArgMode == 1);
20726 bool UseFPOffset = (ArgMode == 2);
20727 unsigned MaxOffset = TotalNumIntRegs * 8 +
20728 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20730 /* Align ArgSize to a multiple of 8 */
20731 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20732 bool NeedsAlign = (Align > 8);
20734 MachineBasicBlock *thisMBB = MBB;
20735 MachineBasicBlock *overflowMBB;
20736 MachineBasicBlock *offsetMBB;
20737 MachineBasicBlock *endMBB;
20739 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20740 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20741 unsigned OffsetReg = 0;
20743 if (!UseGPOffset && !UseFPOffset) {
20744 // If we only pull from the overflow region, we don't create a branch.
20745 // We don't need to alter control flow.
20746 OffsetDestReg = 0; // unused
20747 OverflowDestReg = DestReg;
20749 offsetMBB = nullptr;
20750 overflowMBB = thisMBB;
20753 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20754 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20755 // If not, pull from overflow_area. (branch to overflowMBB)
20760 // offsetMBB overflowMBB
20765 // Registers for the PHI in endMBB
20766 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20767 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20769 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20770 MachineFunction *MF = MBB->getParent();
20771 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20772 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20773 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20775 MachineFunction::iterator MBBIter = MBB;
20778 // Insert the new basic blocks
20779 MF->insert(MBBIter, offsetMBB);
20780 MF->insert(MBBIter, overflowMBB);
20781 MF->insert(MBBIter, endMBB);
20783 // Transfer the remainder of MBB and its successor edges to endMBB.
20784 endMBB->splice(endMBB->begin(), thisMBB,
20785 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20786 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20788 // Make offsetMBB and overflowMBB successors of thisMBB
20789 thisMBB->addSuccessor(offsetMBB);
20790 thisMBB->addSuccessor(overflowMBB);
20792 // endMBB is a successor of both offsetMBB and overflowMBB
20793 offsetMBB->addSuccessor(endMBB);
20794 overflowMBB->addSuccessor(endMBB);
20796 // Load the offset value into a register
20797 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20798 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20802 .addDisp(Disp, UseFPOffset ? 4 : 0)
20803 .addOperand(Segment)
20804 .setMemRefs(MMOBegin, MMOEnd);
20806 // Check if there is enough room left to pull this argument.
20807 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20809 .addImm(MaxOffset + 8 - ArgSizeA8);
20811 // Branch to "overflowMBB" if offset >= max
20812 // Fall through to "offsetMBB" otherwise
20813 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20814 .addMBB(overflowMBB);
20817 // In offsetMBB, emit code to use the reg_save_area.
20819 assert(OffsetReg != 0);
20821 // Read the reg_save_area address.
20822 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20823 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20828 .addOperand(Segment)
20829 .setMemRefs(MMOBegin, MMOEnd);
20831 // Zero-extend the offset
20832 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20833 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20836 .addImm(X86::sub_32bit);
20838 // Add the offset to the reg_save_area to get the final address.
20839 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20840 .addReg(OffsetReg64)
20841 .addReg(RegSaveReg);
20843 // Compute the offset for the next argument
20844 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20845 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20847 .addImm(UseFPOffset ? 16 : 8);
20849 // Store it back into the va_list.
20850 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20854 .addDisp(Disp, UseFPOffset ? 4 : 0)
20855 .addOperand(Segment)
20856 .addReg(NextOffsetReg)
20857 .setMemRefs(MMOBegin, MMOEnd);
20860 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20865 // Emit code to use overflow area
20868 // Load the overflow_area address into a register.
20869 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20870 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
20875 .addOperand(Segment)
20876 .setMemRefs(MMOBegin, MMOEnd);
20878 // If we need to align it, do so. Otherwise, just copy the address
20879 // to OverflowDestReg.
20881 // Align the overflow address
20882 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
20883 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
20885 // aligned_addr = (addr + (align-1)) & ~(align-1)
20886 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
20887 .addReg(OverflowAddrReg)
20890 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
20892 .addImm(~(uint64_t)(Align-1));
20894 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
20895 .addReg(OverflowAddrReg);
20898 // Compute the next overflow address after this argument.
20899 // (the overflow address should be kept 8-byte aligned)
20900 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
20901 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
20902 .addReg(OverflowDestReg)
20903 .addImm(ArgSizeA8);
20905 // Store the new overflow address.
20906 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
20911 .addOperand(Segment)
20912 .addReg(NextAddrReg)
20913 .setMemRefs(MMOBegin, MMOEnd);
20915 // If we branched, emit the PHI to the front of endMBB.
20917 BuildMI(*endMBB, endMBB->begin(), DL,
20918 TII->get(X86::PHI), DestReg)
20919 .addReg(OffsetDestReg).addMBB(offsetMBB)
20920 .addReg(OverflowDestReg).addMBB(overflowMBB);
20923 // Erase the pseudo instruction
20924 MI->eraseFromParent();
20929 MachineBasicBlock *
20930 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
20932 MachineBasicBlock *MBB) const {
20933 // Emit code to save XMM registers to the stack. The ABI says that the
20934 // number of registers to save is given in %al, so it's theoretically
20935 // possible to do an indirect jump trick to avoid saving all of them,
20936 // however this code takes a simpler approach and just executes all
20937 // of the stores if %al is non-zero. It's less code, and it's probably
20938 // easier on the hardware branch predictor, and stores aren't all that
20939 // expensive anyway.
20941 // Create the new basic blocks. One block contains all the XMM stores,
20942 // and one block is the final destination regardless of whether any
20943 // stores were performed.
20944 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20945 MachineFunction *F = MBB->getParent();
20946 MachineFunction::iterator MBBIter = MBB;
20948 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
20949 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
20950 F->insert(MBBIter, XMMSaveMBB);
20951 F->insert(MBBIter, EndMBB);
20953 // Transfer the remainder of MBB and its successor edges to EndMBB.
20954 EndMBB->splice(EndMBB->begin(), MBB,
20955 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20956 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
20958 // The original block will now fall through to the XMM save block.
20959 MBB->addSuccessor(XMMSaveMBB);
20960 // The XMMSaveMBB will fall through to the end block.
20961 XMMSaveMBB->addSuccessor(EndMBB);
20963 // Now add the instructions.
20964 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20965 DebugLoc DL = MI->getDebugLoc();
20967 unsigned CountReg = MI->getOperand(0).getReg();
20968 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
20969 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
20971 if (!Subtarget->isTargetWin64()) {
20972 // If %al is 0, branch around the XMM save block.
20973 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
20974 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
20975 MBB->addSuccessor(EndMBB);
20978 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
20979 // that was just emitted, but clearly shouldn't be "saved".
20980 assert((MI->getNumOperands() <= 3 ||
20981 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
20982 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
20983 && "Expected last argument to be EFLAGS");
20984 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
20985 // In the XMM save block, save all the XMM argument registers.
20986 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
20987 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
20988 MachineMemOperand *MMO =
20989 F->getMachineMemOperand(
20990 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
20991 MachineMemOperand::MOStore,
20992 /*Size=*/16, /*Align=*/16);
20993 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
20994 .addFrameIndex(RegSaveFrameIndex)
20995 .addImm(/*Scale=*/1)
20996 .addReg(/*IndexReg=*/0)
20997 .addImm(/*Disp=*/Offset)
20998 .addReg(/*Segment=*/0)
20999 .addReg(MI->getOperand(i).getReg())
21000 .addMemOperand(MMO);
21003 MI->eraseFromParent(); // The pseudo instruction is gone now.
21008 // The EFLAGS operand of SelectItr might be missing a kill marker
21009 // because there were multiple uses of EFLAGS, and ISel didn't know
21010 // which to mark. Figure out whether SelectItr should have had a
21011 // kill marker, and set it if it should. Returns the correct kill
21013 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21014 MachineBasicBlock* BB,
21015 const TargetRegisterInfo* TRI) {
21016 // Scan forward through BB for a use/def of EFLAGS.
21017 MachineBasicBlock::iterator miI(std::next(SelectItr));
21018 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21019 const MachineInstr& mi = *miI;
21020 if (mi.readsRegister(X86::EFLAGS))
21022 if (mi.definesRegister(X86::EFLAGS))
21023 break; // Should have kill-flag - update below.
21026 // If we hit the end of the block, check whether EFLAGS is live into a
21028 if (miI == BB->end()) {
21029 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21030 sEnd = BB->succ_end();
21031 sItr != sEnd; ++sItr) {
21032 MachineBasicBlock* succ = *sItr;
21033 if (succ->isLiveIn(X86::EFLAGS))
21038 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21039 // out. SelectMI should have a kill flag on EFLAGS.
21040 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21044 MachineBasicBlock *
21045 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21046 MachineBasicBlock *BB) const {
21047 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21048 DebugLoc DL = MI->getDebugLoc();
21050 // To "insert" a SELECT_CC instruction, we actually have to insert the
21051 // diamond control-flow pattern. The incoming instruction knows the
21052 // destination vreg to set, the condition code register to branch on, the
21053 // true/false values to select between, and a branch opcode to use.
21054 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21055 MachineFunction::iterator It = BB;
21061 // cmpTY ccX, r1, r2
21063 // fallthrough --> copy0MBB
21064 MachineBasicBlock *thisMBB = BB;
21065 MachineFunction *F = BB->getParent();
21066 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21067 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21068 F->insert(It, copy0MBB);
21069 F->insert(It, sinkMBB);
21071 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21072 // live into the sink and copy blocks.
21073 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21074 if (!MI->killsRegister(X86::EFLAGS) &&
21075 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21076 copy0MBB->addLiveIn(X86::EFLAGS);
21077 sinkMBB->addLiveIn(X86::EFLAGS);
21080 // Transfer the remainder of BB and its successor edges to sinkMBB.
21081 sinkMBB->splice(sinkMBB->begin(), BB,
21082 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21083 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21085 // Add the true and fallthrough blocks as its successors.
21086 BB->addSuccessor(copy0MBB);
21087 BB->addSuccessor(sinkMBB);
21089 // Create the conditional branch instruction.
21091 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21092 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21095 // %FalseValue = ...
21096 // # fallthrough to sinkMBB
21097 copy0MBB->addSuccessor(sinkMBB);
21100 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21102 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21103 TII->get(X86::PHI), MI->getOperand(0).getReg())
21104 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21105 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21107 MI->eraseFromParent(); // The pseudo instruction is gone now.
21111 MachineBasicBlock *
21112 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21113 MachineBasicBlock *BB) const {
21114 MachineFunction *MF = BB->getParent();
21115 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21116 DebugLoc DL = MI->getDebugLoc();
21117 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21119 assert(MF->shouldSplitStack());
21121 const bool Is64Bit = Subtarget->is64Bit();
21122 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21124 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21125 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21128 // ... [Till the alloca]
21129 // If stacklet is not large enough, jump to mallocMBB
21132 // Allocate by subtracting from RSP
21133 // Jump to continueMBB
21136 // Allocate by call to runtime
21140 // [rest of original BB]
21143 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21144 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21145 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21147 MachineRegisterInfo &MRI = MF->getRegInfo();
21148 const TargetRegisterClass *AddrRegClass =
21149 getRegClassFor(getPointerTy());
21151 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21152 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21153 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21154 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21155 sizeVReg = MI->getOperand(1).getReg(),
21156 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21158 MachineFunction::iterator MBBIter = BB;
21161 MF->insert(MBBIter, bumpMBB);
21162 MF->insert(MBBIter, mallocMBB);
21163 MF->insert(MBBIter, continueMBB);
21165 continueMBB->splice(continueMBB->begin(), BB,
21166 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21167 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21169 // Add code to the main basic block to check if the stack limit has been hit,
21170 // and if so, jump to mallocMBB otherwise to bumpMBB.
21171 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21172 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21173 .addReg(tmpSPVReg).addReg(sizeVReg);
21174 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21175 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21176 .addReg(SPLimitVReg);
21177 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21179 // bumpMBB simply decreases the stack pointer, since we know the current
21180 // stacklet has enough space.
21181 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21182 .addReg(SPLimitVReg);
21183 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21184 .addReg(SPLimitVReg);
21185 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21187 // Calls into a routine in libgcc to allocate more space from the heap.
21188 const uint32_t *RegMask =
21189 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21191 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21193 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21194 .addExternalSymbol("__morestack_allocate_stack_space")
21195 .addRegMask(RegMask)
21196 .addReg(X86::RDI, RegState::Implicit)
21197 .addReg(X86::RAX, RegState::ImplicitDefine);
21198 } else if (Is64Bit) {
21199 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21201 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21202 .addExternalSymbol("__morestack_allocate_stack_space")
21203 .addRegMask(RegMask)
21204 .addReg(X86::EDI, RegState::Implicit)
21205 .addReg(X86::EAX, RegState::ImplicitDefine);
21207 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21209 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21210 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21211 .addExternalSymbol("__morestack_allocate_stack_space")
21212 .addRegMask(RegMask)
21213 .addReg(X86::EAX, RegState::ImplicitDefine);
21217 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21220 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21221 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21222 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21224 // Set up the CFG correctly.
21225 BB->addSuccessor(bumpMBB);
21226 BB->addSuccessor(mallocMBB);
21227 mallocMBB->addSuccessor(continueMBB);
21228 bumpMBB->addSuccessor(continueMBB);
21230 // Take care of the PHI nodes.
21231 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21232 MI->getOperand(0).getReg())
21233 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21234 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21236 // Delete the original pseudo instruction.
21237 MI->eraseFromParent();
21240 return continueMBB;
21243 MachineBasicBlock *
21244 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21245 MachineBasicBlock *BB) const {
21246 DebugLoc DL = MI->getDebugLoc();
21248 assert(!Subtarget->isTargetMachO());
21250 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21252 MI->eraseFromParent(); // The pseudo instruction is gone now.
21256 MachineBasicBlock *
21257 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21258 MachineBasicBlock *BB) const {
21259 // This is pretty easy. We're taking the value that we received from
21260 // our load from the relocation, sticking it in either RDI (x86-64)
21261 // or EAX and doing an indirect call. The return value will then
21262 // be in the normal return register.
21263 MachineFunction *F = BB->getParent();
21264 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21265 DebugLoc DL = MI->getDebugLoc();
21267 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21268 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21270 // Get a register mask for the lowered call.
21271 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21272 // proper register mask.
21273 const uint32_t *RegMask =
21274 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21275 if (Subtarget->is64Bit()) {
21276 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21277 TII->get(X86::MOV64rm), X86::RDI)
21279 .addImm(0).addReg(0)
21280 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21281 MI->getOperand(3).getTargetFlags())
21283 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21284 addDirectMem(MIB, X86::RDI);
21285 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21286 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21287 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21288 TII->get(X86::MOV32rm), X86::EAX)
21290 .addImm(0).addReg(0)
21291 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21292 MI->getOperand(3).getTargetFlags())
21294 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21295 addDirectMem(MIB, X86::EAX);
21296 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21298 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21299 TII->get(X86::MOV32rm), X86::EAX)
21300 .addReg(TII->getGlobalBaseReg(F))
21301 .addImm(0).addReg(0)
21302 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21303 MI->getOperand(3).getTargetFlags())
21305 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21306 addDirectMem(MIB, X86::EAX);
21307 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21310 MI->eraseFromParent(); // The pseudo instruction is gone now.
21314 MachineBasicBlock *
21315 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21316 MachineBasicBlock *MBB) const {
21317 DebugLoc DL = MI->getDebugLoc();
21318 MachineFunction *MF = MBB->getParent();
21319 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21320 MachineRegisterInfo &MRI = MF->getRegInfo();
21322 const BasicBlock *BB = MBB->getBasicBlock();
21323 MachineFunction::iterator I = MBB;
21326 // Memory Reference
21327 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21328 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21331 unsigned MemOpndSlot = 0;
21333 unsigned CurOp = 0;
21335 DstReg = MI->getOperand(CurOp++).getReg();
21336 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21337 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21338 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21339 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21341 MemOpndSlot = CurOp;
21343 MVT PVT = getPointerTy();
21344 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21345 "Invalid Pointer Size!");
21347 // For v = setjmp(buf), we generate
21350 // buf[LabelOffset] = restoreMBB
21351 // SjLjSetup restoreMBB
21357 // v = phi(main, restore)
21360 // if base pointer being used, load it from frame
21363 MachineBasicBlock *thisMBB = MBB;
21364 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21365 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21366 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21367 MF->insert(I, mainMBB);
21368 MF->insert(I, sinkMBB);
21369 MF->push_back(restoreMBB);
21371 MachineInstrBuilder MIB;
21373 // Transfer the remainder of BB and its successor edges to sinkMBB.
21374 sinkMBB->splice(sinkMBB->begin(), MBB,
21375 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21376 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21379 unsigned PtrStoreOpc = 0;
21380 unsigned LabelReg = 0;
21381 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21382 Reloc::Model RM = MF->getTarget().getRelocationModel();
21383 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21384 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21386 // Prepare IP either in reg or imm.
21387 if (!UseImmLabel) {
21388 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21389 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21390 LabelReg = MRI.createVirtualRegister(PtrRC);
21391 if (Subtarget->is64Bit()) {
21392 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21396 .addMBB(restoreMBB)
21399 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21400 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21401 .addReg(XII->getGlobalBaseReg(MF))
21404 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21408 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21410 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21411 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21412 if (i == X86::AddrDisp)
21413 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21415 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21418 MIB.addReg(LabelReg);
21420 MIB.addMBB(restoreMBB);
21421 MIB.setMemRefs(MMOBegin, MMOEnd);
21423 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21424 .addMBB(restoreMBB);
21426 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21427 MIB.addRegMask(RegInfo->getNoPreservedMask());
21428 thisMBB->addSuccessor(mainMBB);
21429 thisMBB->addSuccessor(restoreMBB);
21433 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21434 mainMBB->addSuccessor(sinkMBB);
21437 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21438 TII->get(X86::PHI), DstReg)
21439 .addReg(mainDstReg).addMBB(mainMBB)
21440 .addReg(restoreDstReg).addMBB(restoreMBB);
21443 if (RegInfo->hasBasePointer(*MF)) {
21444 const bool Uses64BitFramePtr =
21445 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21446 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21447 X86FI->setRestoreBasePointer(MF);
21448 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21449 unsigned BasePtr = RegInfo->getBaseRegister();
21450 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21451 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21452 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21453 .setMIFlag(MachineInstr::FrameSetup);
21455 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21456 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21457 restoreMBB->addSuccessor(sinkMBB);
21459 MI->eraseFromParent();
21463 MachineBasicBlock *
21464 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21465 MachineBasicBlock *MBB) const {
21466 DebugLoc DL = MI->getDebugLoc();
21467 MachineFunction *MF = MBB->getParent();
21468 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21469 MachineRegisterInfo &MRI = MF->getRegInfo();
21471 // Memory Reference
21472 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21473 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21475 MVT PVT = getPointerTy();
21476 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21477 "Invalid Pointer Size!");
21479 const TargetRegisterClass *RC =
21480 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21481 unsigned Tmp = MRI.createVirtualRegister(RC);
21482 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21483 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21484 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21485 unsigned SP = RegInfo->getStackRegister();
21487 MachineInstrBuilder MIB;
21489 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21490 const int64_t SPOffset = 2 * PVT.getStoreSize();
21492 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21493 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21496 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21497 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21498 MIB.addOperand(MI->getOperand(i));
21499 MIB.setMemRefs(MMOBegin, MMOEnd);
21501 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21502 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21503 if (i == X86::AddrDisp)
21504 MIB.addDisp(MI->getOperand(i), LabelOffset);
21506 MIB.addOperand(MI->getOperand(i));
21508 MIB.setMemRefs(MMOBegin, MMOEnd);
21510 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21511 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21512 if (i == X86::AddrDisp)
21513 MIB.addDisp(MI->getOperand(i), SPOffset);
21515 MIB.addOperand(MI->getOperand(i));
21517 MIB.setMemRefs(MMOBegin, MMOEnd);
21519 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21521 MI->eraseFromParent();
21525 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21526 // accumulator loops. Writing back to the accumulator allows the coalescer
21527 // to remove extra copies in the loop.
21528 MachineBasicBlock *
21529 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21530 MachineBasicBlock *MBB) const {
21531 MachineOperand &AddendOp = MI->getOperand(3);
21533 // Bail out early if the addend isn't a register - we can't switch these.
21534 if (!AddendOp.isReg())
21537 MachineFunction &MF = *MBB->getParent();
21538 MachineRegisterInfo &MRI = MF.getRegInfo();
21540 // Check whether the addend is defined by a PHI:
21541 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21542 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21543 if (!AddendDef.isPHI())
21546 // Look for the following pattern:
21548 // %addend = phi [%entry, 0], [%loop, %result]
21550 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21554 // %addend = phi [%entry, 0], [%loop, %result]
21556 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21558 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21559 assert(AddendDef.getOperand(i).isReg());
21560 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21561 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21562 if (&PHISrcInst == MI) {
21563 // Found a matching instruction.
21564 unsigned NewFMAOpc = 0;
21565 switch (MI->getOpcode()) {
21566 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21567 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21568 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21569 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21570 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21571 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21572 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21573 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21574 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21575 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21576 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21577 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21578 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21579 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21580 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21581 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21582 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21583 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21584 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21585 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21587 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21588 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21589 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21590 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21591 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21592 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21593 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21594 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21595 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21596 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21597 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21598 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21599 default: llvm_unreachable("Unrecognized FMA variant.");
21602 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21603 MachineInstrBuilder MIB =
21604 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21605 .addOperand(MI->getOperand(0))
21606 .addOperand(MI->getOperand(3))
21607 .addOperand(MI->getOperand(2))
21608 .addOperand(MI->getOperand(1));
21609 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21610 MI->eraseFromParent();
21617 MachineBasicBlock *
21618 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21619 MachineBasicBlock *BB) const {
21620 switch (MI->getOpcode()) {
21621 default: llvm_unreachable("Unexpected instr type to insert");
21622 case X86::TAILJMPd64:
21623 case X86::TAILJMPr64:
21624 case X86::TAILJMPm64:
21625 case X86::TAILJMPd64_REX:
21626 case X86::TAILJMPr64_REX:
21627 case X86::TAILJMPm64_REX:
21628 llvm_unreachable("TAILJMP64 would not be touched here.");
21629 case X86::TCRETURNdi64:
21630 case X86::TCRETURNri64:
21631 case X86::TCRETURNmi64:
21633 case X86::WIN_ALLOCA:
21634 return EmitLoweredWinAlloca(MI, BB);
21635 case X86::SEG_ALLOCA_32:
21636 case X86::SEG_ALLOCA_64:
21637 return EmitLoweredSegAlloca(MI, BB);
21638 case X86::TLSCall_32:
21639 case X86::TLSCall_64:
21640 return EmitLoweredTLSCall(MI, BB);
21641 case X86::CMOV_GR8:
21642 case X86::CMOV_FR32:
21643 case X86::CMOV_FR64:
21644 case X86::CMOV_V4F32:
21645 case X86::CMOV_V2F64:
21646 case X86::CMOV_V2I64:
21647 case X86::CMOV_V8F32:
21648 case X86::CMOV_V4F64:
21649 case X86::CMOV_V4I64:
21650 case X86::CMOV_V16F32:
21651 case X86::CMOV_V8F64:
21652 case X86::CMOV_V8I64:
21653 case X86::CMOV_GR16:
21654 case X86::CMOV_GR32:
21655 case X86::CMOV_RFP32:
21656 case X86::CMOV_RFP64:
21657 case X86::CMOV_RFP80:
21658 return EmitLoweredSelect(MI, BB);
21660 case X86::FP32_TO_INT16_IN_MEM:
21661 case X86::FP32_TO_INT32_IN_MEM:
21662 case X86::FP32_TO_INT64_IN_MEM:
21663 case X86::FP64_TO_INT16_IN_MEM:
21664 case X86::FP64_TO_INT32_IN_MEM:
21665 case X86::FP64_TO_INT64_IN_MEM:
21666 case X86::FP80_TO_INT16_IN_MEM:
21667 case X86::FP80_TO_INT32_IN_MEM:
21668 case X86::FP80_TO_INT64_IN_MEM: {
21669 MachineFunction *F = BB->getParent();
21670 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21671 DebugLoc DL = MI->getDebugLoc();
21673 // Change the floating point control register to use "round towards zero"
21674 // mode when truncating to an integer value.
21675 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21676 addFrameReference(BuildMI(*BB, MI, DL,
21677 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21679 // Load the old value of the high byte of the control word...
21681 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21682 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21685 // Set the high part to be round to zero...
21686 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21689 // Reload the modified control word now...
21690 addFrameReference(BuildMI(*BB, MI, DL,
21691 TII->get(X86::FLDCW16m)), CWFrameIdx);
21693 // Restore the memory image of control word to original value
21694 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21697 // Get the X86 opcode to use.
21699 switch (MI->getOpcode()) {
21700 default: llvm_unreachable("illegal opcode!");
21701 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21702 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21703 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21704 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21705 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21706 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21707 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21708 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21709 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21713 MachineOperand &Op = MI->getOperand(0);
21715 AM.BaseType = X86AddressMode::RegBase;
21716 AM.Base.Reg = Op.getReg();
21718 AM.BaseType = X86AddressMode::FrameIndexBase;
21719 AM.Base.FrameIndex = Op.getIndex();
21721 Op = MI->getOperand(1);
21723 AM.Scale = Op.getImm();
21724 Op = MI->getOperand(2);
21726 AM.IndexReg = Op.getImm();
21727 Op = MI->getOperand(3);
21728 if (Op.isGlobal()) {
21729 AM.GV = Op.getGlobal();
21731 AM.Disp = Op.getImm();
21733 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21734 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21736 // Reload the original control word now.
21737 addFrameReference(BuildMI(*BB, MI, DL,
21738 TII->get(X86::FLDCW16m)), CWFrameIdx);
21740 MI->eraseFromParent(); // The pseudo instruction is gone now.
21743 // String/text processing lowering.
21744 case X86::PCMPISTRM128REG:
21745 case X86::VPCMPISTRM128REG:
21746 case X86::PCMPISTRM128MEM:
21747 case X86::VPCMPISTRM128MEM:
21748 case X86::PCMPESTRM128REG:
21749 case X86::VPCMPESTRM128REG:
21750 case X86::PCMPESTRM128MEM:
21751 case X86::VPCMPESTRM128MEM:
21752 assert(Subtarget->hasSSE42() &&
21753 "Target must have SSE4.2 or AVX features enabled");
21754 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21756 // String/text processing lowering.
21757 case X86::PCMPISTRIREG:
21758 case X86::VPCMPISTRIREG:
21759 case X86::PCMPISTRIMEM:
21760 case X86::VPCMPISTRIMEM:
21761 case X86::PCMPESTRIREG:
21762 case X86::VPCMPESTRIREG:
21763 case X86::PCMPESTRIMEM:
21764 case X86::VPCMPESTRIMEM:
21765 assert(Subtarget->hasSSE42() &&
21766 "Target must have SSE4.2 or AVX features enabled");
21767 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21769 // Thread synchronization.
21771 return EmitMonitor(MI, BB, Subtarget);
21775 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21777 case X86::VASTART_SAVE_XMM_REGS:
21778 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21780 case X86::VAARG_64:
21781 return EmitVAARG64WithCustomInserter(MI, BB);
21783 case X86::EH_SjLj_SetJmp32:
21784 case X86::EH_SjLj_SetJmp64:
21785 return emitEHSjLjSetJmp(MI, BB);
21787 case X86::EH_SjLj_LongJmp32:
21788 case X86::EH_SjLj_LongJmp64:
21789 return emitEHSjLjLongJmp(MI, BB);
21791 case TargetOpcode::STATEPOINT:
21792 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21793 // this point in the process. We diverge later.
21794 return emitPatchPoint(MI, BB);
21796 case TargetOpcode::STACKMAP:
21797 case TargetOpcode::PATCHPOINT:
21798 return emitPatchPoint(MI, BB);
21800 case X86::VFMADDPDr213r:
21801 case X86::VFMADDPSr213r:
21802 case X86::VFMADDSDr213r:
21803 case X86::VFMADDSSr213r:
21804 case X86::VFMSUBPDr213r:
21805 case X86::VFMSUBPSr213r:
21806 case X86::VFMSUBSDr213r:
21807 case X86::VFMSUBSSr213r:
21808 case X86::VFNMADDPDr213r:
21809 case X86::VFNMADDPSr213r:
21810 case X86::VFNMADDSDr213r:
21811 case X86::VFNMADDSSr213r:
21812 case X86::VFNMSUBPDr213r:
21813 case X86::VFNMSUBPSr213r:
21814 case X86::VFNMSUBSDr213r:
21815 case X86::VFNMSUBSSr213r:
21816 case X86::VFMADDSUBPDr213r:
21817 case X86::VFMADDSUBPSr213r:
21818 case X86::VFMSUBADDPDr213r:
21819 case X86::VFMSUBADDPSr213r:
21820 case X86::VFMADDPDr213rY:
21821 case X86::VFMADDPSr213rY:
21822 case X86::VFMSUBPDr213rY:
21823 case X86::VFMSUBPSr213rY:
21824 case X86::VFNMADDPDr213rY:
21825 case X86::VFNMADDPSr213rY:
21826 case X86::VFNMSUBPDr213rY:
21827 case X86::VFNMSUBPSr213rY:
21828 case X86::VFMADDSUBPDr213rY:
21829 case X86::VFMADDSUBPSr213rY:
21830 case X86::VFMSUBADDPDr213rY:
21831 case X86::VFMSUBADDPSr213rY:
21832 return emitFMA3Instr(MI, BB);
21836 //===----------------------------------------------------------------------===//
21837 // X86 Optimization Hooks
21838 //===----------------------------------------------------------------------===//
21840 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21843 const SelectionDAG &DAG,
21844 unsigned Depth) const {
21845 unsigned BitWidth = KnownZero.getBitWidth();
21846 unsigned Opc = Op.getOpcode();
21847 assert((Opc >= ISD::BUILTIN_OP_END ||
21848 Opc == ISD::INTRINSIC_WO_CHAIN ||
21849 Opc == ISD::INTRINSIC_W_CHAIN ||
21850 Opc == ISD::INTRINSIC_VOID) &&
21851 "Should use MaskedValueIsZero if you don't know whether Op"
21852 " is a target node!");
21854 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21868 // These nodes' second result is a boolean.
21869 if (Op.getResNo() == 0)
21872 case X86ISD::SETCC:
21873 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
21875 case ISD::INTRINSIC_WO_CHAIN: {
21876 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
21877 unsigned NumLoBits = 0;
21880 case Intrinsic::x86_sse_movmsk_ps:
21881 case Intrinsic::x86_avx_movmsk_ps_256:
21882 case Intrinsic::x86_sse2_movmsk_pd:
21883 case Intrinsic::x86_avx_movmsk_pd_256:
21884 case Intrinsic::x86_mmx_pmovmskb:
21885 case Intrinsic::x86_sse2_pmovmskb_128:
21886 case Intrinsic::x86_avx2_pmovmskb: {
21887 // High bits of movmskp{s|d}, pmovmskb are known zero.
21889 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
21890 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
21891 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
21892 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
21893 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
21894 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
21895 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
21896 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
21898 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
21907 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
21909 const SelectionDAG &,
21910 unsigned Depth) const {
21911 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
21912 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
21913 return Op.getValueType().getScalarType().getSizeInBits();
21919 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
21920 /// node is a GlobalAddress + offset.
21921 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
21922 const GlobalValue* &GA,
21923 int64_t &Offset) const {
21924 if (N->getOpcode() == X86ISD::Wrapper) {
21925 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
21926 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
21927 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
21931 return TargetLowering::isGAPlusOffset(N, GA, Offset);
21934 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
21935 /// same as extracting the high 128-bit part of 256-bit vector and then
21936 /// inserting the result into the low part of a new 256-bit vector
21937 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
21938 EVT VT = SVOp->getValueType(0);
21939 unsigned NumElems = VT.getVectorNumElements();
21941 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21942 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
21943 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21944 SVOp->getMaskElt(j) >= 0)
21950 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
21951 /// same as extracting the low 128-bit part of 256-bit vector and then
21952 /// inserting the result into the high part of a new 256-bit vector
21953 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
21954 EVT VT = SVOp->getValueType(0);
21955 unsigned NumElems = VT.getVectorNumElements();
21957 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21958 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
21959 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21960 SVOp->getMaskElt(j) >= 0)
21966 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
21967 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
21968 TargetLowering::DAGCombinerInfo &DCI,
21969 const X86Subtarget* Subtarget) {
21971 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
21972 SDValue V1 = SVOp->getOperand(0);
21973 SDValue V2 = SVOp->getOperand(1);
21974 EVT VT = SVOp->getValueType(0);
21975 unsigned NumElems = VT.getVectorNumElements();
21977 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
21978 V2.getOpcode() == ISD::CONCAT_VECTORS) {
21982 // V UNDEF BUILD_VECTOR UNDEF
21984 // CONCAT_VECTOR CONCAT_VECTOR
21987 // RESULT: V + zero extended
21989 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
21990 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
21991 V1.getOperand(1).getOpcode() != ISD::UNDEF)
21994 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
21997 // To match the shuffle mask, the first half of the mask should
21998 // be exactly the first vector, and all the rest a splat with the
21999 // first element of the second one.
22000 for (unsigned i = 0; i != NumElems/2; ++i)
22001 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22002 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22005 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22006 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22007 if (Ld->hasNUsesOfValue(1, 0)) {
22008 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22009 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22011 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22013 Ld->getPointerInfo(),
22014 Ld->getAlignment(),
22015 false/*isVolatile*/, true/*ReadMem*/,
22016 false/*WriteMem*/);
22018 // Make sure the newly-created LOAD is in the same position as Ld in
22019 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22020 // and update uses of Ld's output chain to use the TokenFactor.
22021 if (Ld->hasAnyUseOfValue(1)) {
22022 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22023 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22024 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22025 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22026 SDValue(ResNode.getNode(), 1));
22029 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22033 // Emit a zeroed vector and insert the desired subvector on its
22035 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22036 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22037 return DCI.CombineTo(N, InsV);
22040 //===--------------------------------------------------------------------===//
22041 // Combine some shuffles into subvector extracts and inserts:
22044 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22045 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22046 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22047 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22048 return DCI.CombineTo(N, InsV);
22051 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22052 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22053 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22054 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22055 return DCI.CombineTo(N, InsV);
22061 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22064 /// This is the leaf of the recursive combinine below. When we have found some
22065 /// chain of single-use x86 shuffle instructions and accumulated the combined
22066 /// shuffle mask represented by them, this will try to pattern match that mask
22067 /// into either a single instruction if there is a special purpose instruction
22068 /// for this operation, or into a PSHUFB instruction which is a fully general
22069 /// instruction but should only be used to replace chains over a certain depth.
22070 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22071 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22072 TargetLowering::DAGCombinerInfo &DCI,
22073 const X86Subtarget *Subtarget) {
22074 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22076 // Find the operand that enters the chain. Note that multiple uses are OK
22077 // here, we're not going to remove the operand we find.
22078 SDValue Input = Op.getOperand(0);
22079 while (Input.getOpcode() == ISD::BITCAST)
22080 Input = Input.getOperand(0);
22082 MVT VT = Input.getSimpleValueType();
22083 MVT RootVT = Root.getSimpleValueType();
22086 // Just remove no-op shuffle masks.
22087 if (Mask.size() == 1) {
22088 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22093 // Use the float domain if the operand type is a floating point type.
22094 bool FloatDomain = VT.isFloatingPoint();
22096 // For floating point shuffles, we don't have free copies in the shuffle
22097 // instructions or the ability to load as part of the instruction, so
22098 // canonicalize their shuffles to UNPCK or MOV variants.
22100 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22101 // vectors because it can have a load folded into it that UNPCK cannot. This
22102 // doesn't preclude something switching to the shorter encoding post-RA.
22104 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22105 bool Lo = Mask.equals(0, 0);
22108 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22109 // is no slower than UNPCKLPD but has the option to fold the input operand
22110 // into even an unaligned memory load.
22111 if (Lo && Subtarget->hasSSE3()) {
22112 Shuffle = X86ISD::MOVDDUP;
22113 ShuffleVT = MVT::v2f64;
22115 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22116 // than the UNPCK variants.
22117 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22118 ShuffleVT = MVT::v4f32;
22120 if (Depth == 1 && Root->getOpcode() == Shuffle)
22121 return false; // Nothing to do!
22122 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22123 DCI.AddToWorklist(Op.getNode());
22124 if (Shuffle == X86ISD::MOVDDUP)
22125 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22127 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22128 DCI.AddToWorklist(Op.getNode());
22129 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22133 if (Subtarget->hasSSE3() &&
22134 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22135 bool Lo = Mask.equals(0, 0, 2, 2);
22136 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22137 MVT ShuffleVT = MVT::v4f32;
22138 if (Depth == 1 && Root->getOpcode() == Shuffle)
22139 return false; // Nothing to do!
22140 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22141 DCI.AddToWorklist(Op.getNode());
22142 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22143 DCI.AddToWorklist(Op.getNode());
22144 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22148 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22149 bool Lo = Mask.equals(0, 0, 1, 1);
22150 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22151 MVT ShuffleVT = MVT::v4f32;
22152 if (Depth == 1 && Root->getOpcode() == Shuffle)
22153 return false; // Nothing to do!
22154 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22155 DCI.AddToWorklist(Op.getNode());
22156 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22157 DCI.AddToWorklist(Op.getNode());
22158 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22164 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22165 // variants as none of these have single-instruction variants that are
22166 // superior to the UNPCK formulation.
22167 if (!FloatDomain &&
22168 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22169 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22170 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22171 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22173 bool Lo = Mask[0] == 0;
22174 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22175 if (Depth == 1 && Root->getOpcode() == Shuffle)
22176 return false; // Nothing to do!
22178 switch (Mask.size()) {
22180 ShuffleVT = MVT::v8i16;
22183 ShuffleVT = MVT::v16i8;
22186 llvm_unreachable("Impossible mask size!");
22188 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22189 DCI.AddToWorklist(Op.getNode());
22190 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22191 DCI.AddToWorklist(Op.getNode());
22192 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22197 // Don't try to re-form single instruction chains under any circumstances now
22198 // that we've done encoding canonicalization for them.
22202 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22203 // can replace them with a single PSHUFB instruction profitably. Intel's
22204 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22205 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22206 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22207 SmallVector<SDValue, 16> PSHUFBMask;
22208 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22209 int Ratio = 16 / Mask.size();
22210 for (unsigned i = 0; i < 16; ++i) {
22211 if (Mask[i / Ratio] == SM_SentinelUndef) {
22212 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22215 int M = Mask[i / Ratio] != SM_SentinelZero
22216 ? Ratio * Mask[i / Ratio] + i % Ratio
22218 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22220 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22221 DCI.AddToWorklist(Op.getNode());
22222 SDValue PSHUFBMaskOp =
22223 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22224 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22225 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22226 DCI.AddToWorklist(Op.getNode());
22227 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22232 // Failed to find any combines.
22236 /// \brief Fully generic combining of x86 shuffle instructions.
22238 /// This should be the last combine run over the x86 shuffle instructions. Once
22239 /// they have been fully optimized, this will recursively consider all chains
22240 /// of single-use shuffle instructions, build a generic model of the cumulative
22241 /// shuffle operation, and check for simpler instructions which implement this
22242 /// operation. We use this primarily for two purposes:
22244 /// 1) Collapse generic shuffles to specialized single instructions when
22245 /// equivalent. In most cases, this is just an encoding size win, but
22246 /// sometimes we will collapse multiple generic shuffles into a single
22247 /// special-purpose shuffle.
22248 /// 2) Look for sequences of shuffle instructions with 3 or more total
22249 /// instructions, and replace them with the slightly more expensive SSSE3
22250 /// PSHUFB instruction if available. We do this as the last combining step
22251 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22252 /// a suitable short sequence of other instructions. The PHUFB will either
22253 /// use a register or have to read from memory and so is slightly (but only
22254 /// slightly) more expensive than the other shuffle instructions.
22256 /// Because this is inherently a quadratic operation (for each shuffle in
22257 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22258 /// This should never be an issue in practice as the shuffle lowering doesn't
22259 /// produce sequences of more than 8 instructions.
22261 /// FIXME: We will currently miss some cases where the redundant shuffling
22262 /// would simplify under the threshold for PSHUFB formation because of
22263 /// combine-ordering. To fix this, we should do the redundant instruction
22264 /// combining in this recursive walk.
22265 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22266 ArrayRef<int> RootMask,
22267 int Depth, bool HasPSHUFB,
22269 TargetLowering::DAGCombinerInfo &DCI,
22270 const X86Subtarget *Subtarget) {
22271 // Bound the depth of our recursive combine because this is ultimately
22272 // quadratic in nature.
22276 // Directly rip through bitcasts to find the underlying operand.
22277 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22278 Op = Op.getOperand(0);
22280 MVT VT = Op.getSimpleValueType();
22281 if (!VT.isVector())
22282 return false; // Bail if we hit a non-vector.
22283 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22284 // version should be added.
22285 if (VT.getSizeInBits() != 128)
22288 assert(Root.getSimpleValueType().isVector() &&
22289 "Shuffles operate on vector types!");
22290 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22291 "Can only combine shuffles of the same vector register size.");
22293 if (!isTargetShuffle(Op.getOpcode()))
22295 SmallVector<int, 16> OpMask;
22297 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22298 // We only can combine unary shuffles which we can decode the mask for.
22299 if (!HaveMask || !IsUnary)
22302 assert(VT.getVectorNumElements() == OpMask.size() &&
22303 "Different mask size from vector size!");
22304 assert(((RootMask.size() > OpMask.size() &&
22305 RootMask.size() % OpMask.size() == 0) ||
22306 (OpMask.size() > RootMask.size() &&
22307 OpMask.size() % RootMask.size() == 0) ||
22308 OpMask.size() == RootMask.size()) &&
22309 "The smaller number of elements must divide the larger.");
22310 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22311 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22312 assert(((RootRatio == 1 && OpRatio == 1) ||
22313 (RootRatio == 1) != (OpRatio == 1)) &&
22314 "Must not have a ratio for both incoming and op masks!");
22316 SmallVector<int, 16> Mask;
22317 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22319 // Merge this shuffle operation's mask into our accumulated mask. Note that
22320 // this shuffle's mask will be the first applied to the input, followed by the
22321 // root mask to get us all the way to the root value arrangement. The reason
22322 // for this order is that we are recursing up the operation chain.
22323 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22324 int RootIdx = i / RootRatio;
22325 if (RootMask[RootIdx] < 0) {
22326 // This is a zero or undef lane, we're done.
22327 Mask.push_back(RootMask[RootIdx]);
22331 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22332 int OpIdx = RootMaskedIdx / OpRatio;
22333 if (OpMask[OpIdx] < 0) {
22334 // The incoming lanes are zero or undef, it doesn't matter which ones we
22336 Mask.push_back(OpMask[OpIdx]);
22340 // Ok, we have non-zero lanes, map them through.
22341 Mask.push_back(OpMask[OpIdx] * OpRatio +
22342 RootMaskedIdx % OpRatio);
22345 // See if we can recurse into the operand to combine more things.
22346 switch (Op.getOpcode()) {
22347 case X86ISD::PSHUFB:
22349 case X86ISD::PSHUFD:
22350 case X86ISD::PSHUFHW:
22351 case X86ISD::PSHUFLW:
22352 if (Op.getOperand(0).hasOneUse() &&
22353 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22354 HasPSHUFB, DAG, DCI, Subtarget))
22358 case X86ISD::UNPCKL:
22359 case X86ISD::UNPCKH:
22360 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22361 // We can't check for single use, we have to check that this shuffle is the only user.
22362 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22363 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22364 HasPSHUFB, DAG, DCI, Subtarget))
22369 // Minor canonicalization of the accumulated shuffle mask to make it easier
22370 // to match below. All this does is detect masks with squential pairs of
22371 // elements, and shrink them to the half-width mask. It does this in a loop
22372 // so it will reduce the size of the mask to the minimal width mask which
22373 // performs an equivalent shuffle.
22374 SmallVector<int, 16> WidenedMask;
22375 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22376 Mask = std::move(WidenedMask);
22377 WidenedMask.clear();
22380 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22384 /// \brief Get the PSHUF-style mask from PSHUF node.
22386 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22387 /// PSHUF-style masks that can be reused with such instructions.
22388 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22389 SmallVector<int, 4> Mask;
22391 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22395 switch (N.getOpcode()) {
22396 case X86ISD::PSHUFD:
22398 case X86ISD::PSHUFLW:
22401 case X86ISD::PSHUFHW:
22402 Mask.erase(Mask.begin(), Mask.begin() + 4);
22403 for (int &M : Mask)
22407 llvm_unreachable("No valid shuffle instruction found!");
22411 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22413 /// We walk up the chain and look for a combinable shuffle, skipping over
22414 /// shuffles that we could hoist this shuffle's transformation past without
22415 /// altering anything.
22417 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22419 TargetLowering::DAGCombinerInfo &DCI) {
22420 assert(N.getOpcode() == X86ISD::PSHUFD &&
22421 "Called with something other than an x86 128-bit half shuffle!");
22424 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22425 // of the shuffles in the chain so that we can form a fresh chain to replace
22427 SmallVector<SDValue, 8> Chain;
22428 SDValue V = N.getOperand(0);
22429 for (; V.hasOneUse(); V = V.getOperand(0)) {
22430 switch (V.getOpcode()) {
22432 return SDValue(); // Nothing combined!
22435 // Skip bitcasts as we always know the type for the target specific
22439 case X86ISD::PSHUFD:
22440 // Found another dword shuffle.
22443 case X86ISD::PSHUFLW:
22444 // Check that the low words (being shuffled) are the identity in the
22445 // dword shuffle, and the high words are self-contained.
22446 if (Mask[0] != 0 || Mask[1] != 1 ||
22447 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22450 Chain.push_back(V);
22453 case X86ISD::PSHUFHW:
22454 // Check that the high words (being shuffled) are the identity in the
22455 // dword shuffle, and the low words are self-contained.
22456 if (Mask[2] != 2 || Mask[3] != 3 ||
22457 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22460 Chain.push_back(V);
22463 case X86ISD::UNPCKL:
22464 case X86ISD::UNPCKH:
22465 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22466 // shuffle into a preceding word shuffle.
22467 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22470 // Search for a half-shuffle which we can combine with.
22471 unsigned CombineOp =
22472 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22473 if (V.getOperand(0) != V.getOperand(1) ||
22474 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22476 Chain.push_back(V);
22477 V = V.getOperand(0);
22479 switch (V.getOpcode()) {
22481 return SDValue(); // Nothing to combine.
22483 case X86ISD::PSHUFLW:
22484 case X86ISD::PSHUFHW:
22485 if (V.getOpcode() == CombineOp)
22488 Chain.push_back(V);
22492 V = V.getOperand(0);
22496 } while (V.hasOneUse());
22499 // Break out of the loop if we break out of the switch.
22503 if (!V.hasOneUse())
22504 // We fell out of the loop without finding a viable combining instruction.
22507 // Merge this node's mask and our incoming mask.
22508 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22509 for (int &M : Mask)
22511 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22512 getV4X86ShuffleImm8ForMask(Mask, DAG));
22514 // Rebuild the chain around this new shuffle.
22515 while (!Chain.empty()) {
22516 SDValue W = Chain.pop_back_val();
22518 if (V.getValueType() != W.getOperand(0).getValueType())
22519 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22521 switch (W.getOpcode()) {
22523 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22525 case X86ISD::UNPCKL:
22526 case X86ISD::UNPCKH:
22527 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22530 case X86ISD::PSHUFD:
22531 case X86ISD::PSHUFLW:
22532 case X86ISD::PSHUFHW:
22533 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22537 if (V.getValueType() != N.getValueType())
22538 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22540 // Return the new chain to replace N.
22544 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22546 /// We walk up the chain, skipping shuffles of the other half and looking
22547 /// through shuffles which switch halves trying to find a shuffle of the same
22548 /// pair of dwords.
22549 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22551 TargetLowering::DAGCombinerInfo &DCI) {
22553 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22554 "Called with something other than an x86 128-bit half shuffle!");
22556 unsigned CombineOpcode = N.getOpcode();
22558 // Walk up a single-use chain looking for a combinable shuffle.
22559 SDValue V = N.getOperand(0);
22560 for (; V.hasOneUse(); V = V.getOperand(0)) {
22561 switch (V.getOpcode()) {
22563 return false; // Nothing combined!
22566 // Skip bitcasts as we always know the type for the target specific
22570 case X86ISD::PSHUFLW:
22571 case X86ISD::PSHUFHW:
22572 if (V.getOpcode() == CombineOpcode)
22575 // Other-half shuffles are no-ops.
22578 // Break out of the loop if we break out of the switch.
22582 if (!V.hasOneUse())
22583 // We fell out of the loop without finding a viable combining instruction.
22586 // Combine away the bottom node as its shuffle will be accumulated into
22587 // a preceding shuffle.
22588 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22590 // Record the old value.
22593 // Merge this node's mask and our incoming mask (adjusted to account for all
22594 // the pshufd instructions encountered).
22595 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22596 for (int &M : Mask)
22598 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22599 getV4X86ShuffleImm8ForMask(Mask, DAG));
22601 // Check that the shuffles didn't cancel each other out. If not, we need to
22602 // combine to the new one.
22604 // Replace the combinable shuffle with the combined one, updating all users
22605 // so that we re-evaluate the chain here.
22606 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22611 /// \brief Try to combine x86 target specific shuffles.
22612 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22613 TargetLowering::DAGCombinerInfo &DCI,
22614 const X86Subtarget *Subtarget) {
22616 MVT VT = N.getSimpleValueType();
22617 SmallVector<int, 4> Mask;
22619 switch (N.getOpcode()) {
22620 case X86ISD::PSHUFD:
22621 case X86ISD::PSHUFLW:
22622 case X86ISD::PSHUFHW:
22623 Mask = getPSHUFShuffleMask(N);
22624 assert(Mask.size() == 4);
22630 // Nuke no-op shuffles that show up after combining.
22631 if (isNoopShuffleMask(Mask))
22632 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22634 // Look for simplifications involving one or two shuffle instructions.
22635 SDValue V = N.getOperand(0);
22636 switch (N.getOpcode()) {
22639 case X86ISD::PSHUFLW:
22640 case X86ISD::PSHUFHW:
22641 assert(VT == MVT::v8i16);
22644 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22645 return SDValue(); // We combined away this shuffle, so we're done.
22647 // See if this reduces to a PSHUFD which is no more expensive and can
22648 // combine with more operations. Note that it has to at least flip the
22649 // dwords as otherwise it would have been removed as a no-op.
22650 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22651 int DMask[] = {0, 1, 2, 3};
22652 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22653 DMask[DOffset + 0] = DOffset + 1;
22654 DMask[DOffset + 1] = DOffset + 0;
22655 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22656 DCI.AddToWorklist(V.getNode());
22657 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22658 getV4X86ShuffleImm8ForMask(DMask, DAG));
22659 DCI.AddToWorklist(V.getNode());
22660 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22663 // Look for shuffle patterns which can be implemented as a single unpack.
22664 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22665 // only works when we have a PSHUFD followed by two half-shuffles.
22666 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22667 (V.getOpcode() == X86ISD::PSHUFLW ||
22668 V.getOpcode() == X86ISD::PSHUFHW) &&
22669 V.getOpcode() != N.getOpcode() &&
22671 SDValue D = V.getOperand(0);
22672 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22673 D = D.getOperand(0);
22674 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22675 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22676 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22677 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22678 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22680 for (int i = 0; i < 4; ++i) {
22681 WordMask[i + NOffset] = Mask[i] + NOffset;
22682 WordMask[i + VOffset] = VMask[i] + VOffset;
22684 // Map the word mask through the DWord mask.
22686 for (int i = 0; i < 8; ++i)
22687 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22688 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22689 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22690 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22691 std::begin(UnpackLoMask)) ||
22692 std::equal(std::begin(MappedMask), std::end(MappedMask),
22693 std::begin(UnpackHiMask))) {
22694 // We can replace all three shuffles with an unpack.
22695 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22696 DCI.AddToWorklist(V.getNode());
22697 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22699 DL, MVT::v8i16, V, V);
22706 case X86ISD::PSHUFD:
22707 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22716 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22718 /// We combine this directly on the abstract vector shuffle nodes so it is
22719 /// easier to generically match. We also insert dummy vector shuffle nodes for
22720 /// the operands which explicitly discard the lanes which are unused by this
22721 /// operation to try to flow through the rest of the combiner the fact that
22722 /// they're unused.
22723 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22725 EVT VT = N->getValueType(0);
22727 // We only handle target-independent shuffles.
22728 // FIXME: It would be easy and harmless to use the target shuffle mask
22729 // extraction tool to support more.
22730 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22733 auto *SVN = cast<ShuffleVectorSDNode>(N);
22734 ArrayRef<int> Mask = SVN->getMask();
22735 SDValue V1 = N->getOperand(0);
22736 SDValue V2 = N->getOperand(1);
22738 // We require the first shuffle operand to be the SUB node, and the second to
22739 // be the ADD node.
22740 // FIXME: We should support the commuted patterns.
22741 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22744 // If there are other uses of these operations we can't fold them.
22745 if (!V1->hasOneUse() || !V2->hasOneUse())
22748 // Ensure that both operations have the same operands. Note that we can
22749 // commute the FADD operands.
22750 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22751 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22752 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22755 // We're looking for blends between FADD and FSUB nodes. We insist on these
22756 // nodes being lined up in a specific expected pattern.
22757 if (!(isShuffleEquivalent(Mask, 0, 3) ||
22758 isShuffleEquivalent(Mask, 0, 5, 2, 7) ||
22759 isShuffleEquivalent(Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22762 // Only specific types are legal at this point, assert so we notice if and
22763 // when these change.
22764 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22765 VT == MVT::v4f64) &&
22766 "Unknown vector type encountered!");
22768 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22771 /// PerformShuffleCombine - Performs several different shuffle combines.
22772 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22773 TargetLowering::DAGCombinerInfo &DCI,
22774 const X86Subtarget *Subtarget) {
22776 SDValue N0 = N->getOperand(0);
22777 SDValue N1 = N->getOperand(1);
22778 EVT VT = N->getValueType(0);
22780 // Don't create instructions with illegal types after legalize types has run.
22781 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22782 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22785 // If we have legalized the vector types, look for blends of FADD and FSUB
22786 // nodes that we can fuse into an ADDSUB node.
22787 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22788 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22791 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22792 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22793 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22794 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22796 // During Type Legalization, when promoting illegal vector types,
22797 // the backend might introduce new shuffle dag nodes and bitcasts.
22799 // This code performs the following transformation:
22800 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22801 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22803 // We do this only if both the bitcast and the BINOP dag nodes have
22804 // one use. Also, perform this transformation only if the new binary
22805 // operation is legal. This is to avoid introducing dag nodes that
22806 // potentially need to be further expanded (or custom lowered) into a
22807 // less optimal sequence of dag nodes.
22808 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22809 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22810 N0.getOpcode() == ISD::BITCAST) {
22811 SDValue BC0 = N0.getOperand(0);
22812 EVT SVT = BC0.getValueType();
22813 unsigned Opcode = BC0.getOpcode();
22814 unsigned NumElts = VT.getVectorNumElements();
22816 if (BC0.hasOneUse() && SVT.isVector() &&
22817 SVT.getVectorNumElements() * 2 == NumElts &&
22818 TLI.isOperationLegal(Opcode, VT)) {
22819 bool CanFold = false;
22831 unsigned SVTNumElts = SVT.getVectorNumElements();
22832 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22833 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22834 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22835 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22836 CanFold = SVOp->getMaskElt(i) < 0;
22839 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22840 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22841 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22842 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22847 // Only handle 128 wide vector from here on.
22848 if (!VT.is128BitVector())
22851 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22852 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22853 // consecutive, non-overlapping, and in the right order.
22854 SmallVector<SDValue, 16> Elts;
22855 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22856 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22858 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22862 if (isTargetShuffle(N->getOpcode())) {
22864 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22865 if (Shuffle.getNode())
22868 // Try recursively combining arbitrary sequences of x86 shuffle
22869 // instructions into higher-order shuffles. We do this after combining
22870 // specific PSHUF instruction sequences into their minimal form so that we
22871 // can evaluate how many specialized shuffle instructions are involved in
22872 // a particular chain.
22873 SmallVector<int, 1> NonceMask; // Just a placeholder.
22874 NonceMask.push_back(0);
22875 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
22876 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
22878 return SDValue(); // This routine will use CombineTo to replace N.
22884 /// PerformTruncateCombine - Converts truncate operation to
22885 /// a sequence of vector shuffle operations.
22886 /// It is possible when we truncate 256-bit vector to 128-bit vector
22887 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
22888 TargetLowering::DAGCombinerInfo &DCI,
22889 const X86Subtarget *Subtarget) {
22893 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
22894 /// specific shuffle of a load can be folded into a single element load.
22895 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
22896 /// shuffles have been custom lowered so we need to handle those here.
22897 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
22898 TargetLowering::DAGCombinerInfo &DCI) {
22899 if (DCI.isBeforeLegalizeOps())
22902 SDValue InVec = N->getOperand(0);
22903 SDValue EltNo = N->getOperand(1);
22905 if (!isa<ConstantSDNode>(EltNo))
22908 EVT OriginalVT = InVec.getValueType();
22910 if (InVec.getOpcode() == ISD::BITCAST) {
22911 // Don't duplicate a load with other uses.
22912 if (!InVec.hasOneUse())
22914 EVT BCVT = InVec.getOperand(0).getValueType();
22915 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
22917 InVec = InVec.getOperand(0);
22920 EVT CurrentVT = InVec.getValueType();
22922 if (!isTargetShuffle(InVec.getOpcode()))
22925 // Don't duplicate a load with other uses.
22926 if (!InVec.hasOneUse())
22929 SmallVector<int, 16> ShuffleMask;
22931 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
22932 ShuffleMask, UnaryShuffle))
22935 // Select the input vector, guarding against out of range extract vector.
22936 unsigned NumElems = CurrentVT.getVectorNumElements();
22937 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
22938 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
22939 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
22940 : InVec.getOperand(1);
22942 // If inputs to shuffle are the same for both ops, then allow 2 uses
22943 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
22944 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
22946 if (LdNode.getOpcode() == ISD::BITCAST) {
22947 // Don't duplicate a load with other uses.
22948 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
22951 AllowedUses = 1; // only allow 1 load use if we have a bitcast
22952 LdNode = LdNode.getOperand(0);
22955 if (!ISD::isNormalLoad(LdNode.getNode()))
22958 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
22960 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
22963 EVT EltVT = N->getValueType(0);
22964 // If there's a bitcast before the shuffle, check if the load type and
22965 // alignment is valid.
22966 unsigned Align = LN0->getAlignment();
22967 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22968 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
22969 EltVT.getTypeForEVT(*DAG.getContext()));
22971 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
22974 // All checks match so transform back to vector_shuffle so that DAG combiner
22975 // can finish the job
22978 // Create shuffle node taking into account the case that its a unary shuffle
22979 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
22980 : InVec.getOperand(1);
22981 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
22982 InVec.getOperand(0), Shuffle,
22984 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
22985 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
22989 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
22990 /// generation and convert it from being a bunch of shuffles and extracts
22991 /// into a somewhat faster sequence. For i686, the best sequence is apparently
22992 /// storing the value and loading scalars back, while for x64 we should
22993 /// use 64-bit extracts and shifts.
22994 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
22995 TargetLowering::DAGCombinerInfo &DCI) {
22996 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
22997 if (NewOp.getNode())
23000 SDValue InputVector = N->getOperand(0);
23002 // Detect mmx to i32 conversion through a v2i32 elt extract.
23003 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23004 N->getValueType(0) == MVT::i32 &&
23005 InputVector.getValueType() == MVT::v2i32) {
23007 // The bitcast source is a direct mmx result.
23008 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23009 if (MMXSrc.getValueType() == MVT::x86mmx)
23010 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23011 N->getValueType(0),
23012 InputVector.getNode()->getOperand(0));
23014 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23015 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23016 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23017 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23018 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23019 MMXSrcOp.getValueType() == MVT::v1i64 &&
23020 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23021 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23022 N->getValueType(0),
23023 MMXSrcOp.getOperand(0));
23026 // Only operate on vectors of 4 elements, where the alternative shuffling
23027 // gets to be more expensive.
23028 if (InputVector.getValueType() != MVT::v4i32)
23031 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23032 // single use which is a sign-extend or zero-extend, and all elements are
23034 SmallVector<SDNode *, 4> Uses;
23035 unsigned ExtractedElements = 0;
23036 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23037 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23038 if (UI.getUse().getResNo() != InputVector.getResNo())
23041 SDNode *Extract = *UI;
23042 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23045 if (Extract->getValueType(0) != MVT::i32)
23047 if (!Extract->hasOneUse())
23049 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23050 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23052 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23055 // Record which element was extracted.
23056 ExtractedElements |=
23057 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23059 Uses.push_back(Extract);
23062 // If not all the elements were used, this may not be worthwhile.
23063 if (ExtractedElements != 15)
23066 // Ok, we've now decided to do the transformation.
23067 // If 64-bit shifts are legal, use the extract-shift sequence,
23068 // otherwise bounce the vector off the cache.
23069 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23071 SDLoc dl(InputVector);
23073 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23074 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23075 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23076 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23077 DAG.getConstant(0, VecIdxTy));
23078 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23079 DAG.getConstant(1, VecIdxTy));
23081 SDValue ShAmt = DAG.getConstant(32,
23082 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23083 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23084 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23085 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23086 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23087 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23088 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23090 // Store the value to a temporary stack slot.
23091 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23092 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23093 MachinePointerInfo(), false, false, 0);
23095 EVT ElementType = InputVector.getValueType().getVectorElementType();
23096 unsigned EltSize = ElementType.getSizeInBits() / 8;
23098 // Replace each use (extract) with a load of the appropriate element.
23099 for (unsigned i = 0; i < 4; ++i) {
23100 uint64_t Offset = EltSize * i;
23101 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23103 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23104 StackPtr, OffsetVal);
23106 // Load the scalar.
23107 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23108 ScalarAddr, MachinePointerInfo(),
23109 false, false, false, 0);
23114 // Replace the extracts
23115 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23116 UE = Uses.end(); UI != UE; ++UI) {
23117 SDNode *Extract = *UI;
23119 SDValue Idx = Extract->getOperand(1);
23120 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23121 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23124 // The replacement was made in place; don't return anything.
23128 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23129 static std::pair<unsigned, bool>
23130 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23131 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23132 if (!VT.isVector())
23133 return std::make_pair(0, false);
23135 bool NeedSplit = false;
23136 switch (VT.getSimpleVT().SimpleTy) {
23137 default: return std::make_pair(0, false);
23140 if (!Subtarget->hasVLX())
23141 return std::make_pair(0, false);
23145 if (!Subtarget->hasBWI())
23146 return std::make_pair(0, false);
23150 if (!Subtarget->hasAVX512())
23151 return std::make_pair(0, false);
23156 if (!Subtarget->hasAVX2())
23158 if (!Subtarget->hasAVX())
23159 return std::make_pair(0, false);
23164 if (!Subtarget->hasSSE2())
23165 return std::make_pair(0, false);
23168 // SSE2 has only a small subset of the operations.
23169 bool hasUnsigned = Subtarget->hasSSE41() ||
23170 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23171 bool hasSigned = Subtarget->hasSSE41() ||
23172 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23174 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23177 // Check for x CC y ? x : y.
23178 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23179 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23184 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23187 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23190 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23193 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23195 // Check for x CC y ? y : x -- a min/max with reversed arms.
23196 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23197 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23202 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23205 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23208 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23211 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23215 return std::make_pair(Opc, NeedSplit);
23219 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23220 const X86Subtarget *Subtarget) {
23222 SDValue Cond = N->getOperand(0);
23223 SDValue LHS = N->getOperand(1);
23224 SDValue RHS = N->getOperand(2);
23226 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23227 SDValue CondSrc = Cond->getOperand(0);
23228 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23229 Cond = CondSrc->getOperand(0);
23232 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23235 // A vselect where all conditions and data are constants can be optimized into
23236 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23237 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23238 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23241 unsigned MaskValue = 0;
23242 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23245 MVT VT = N->getSimpleValueType(0);
23246 unsigned NumElems = VT.getVectorNumElements();
23247 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23248 for (unsigned i = 0; i < NumElems; ++i) {
23249 // Be sure we emit undef where we can.
23250 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23251 ShuffleMask[i] = -1;
23253 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23256 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23257 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23259 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23262 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23264 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23265 TargetLowering::DAGCombinerInfo &DCI,
23266 const X86Subtarget *Subtarget) {
23268 SDValue Cond = N->getOperand(0);
23269 // Get the LHS/RHS of the select.
23270 SDValue LHS = N->getOperand(1);
23271 SDValue RHS = N->getOperand(2);
23272 EVT VT = LHS.getValueType();
23273 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23275 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23276 // instructions match the semantics of the common C idiom x<y?x:y but not
23277 // x<=y?x:y, because of how they handle negative zero (which can be
23278 // ignored in unsafe-math mode).
23279 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23280 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23281 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23282 (Subtarget->hasSSE2() ||
23283 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23284 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23286 unsigned Opcode = 0;
23287 // Check for x CC y ? x : y.
23288 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23289 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23293 // Converting this to a min would handle NaNs incorrectly, and swapping
23294 // the operands would cause it to handle comparisons between positive
23295 // and negative zero incorrectly.
23296 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23297 if (!DAG.getTarget().Options.UnsafeFPMath &&
23298 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23300 std::swap(LHS, RHS);
23302 Opcode = X86ISD::FMIN;
23305 // Converting this to a min would handle comparisons between positive
23306 // and negative zero incorrectly.
23307 if (!DAG.getTarget().Options.UnsafeFPMath &&
23308 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23310 Opcode = X86ISD::FMIN;
23313 // Converting this to a min would handle both negative zeros and NaNs
23314 // incorrectly, but we can swap the operands to fix both.
23315 std::swap(LHS, RHS);
23319 Opcode = X86ISD::FMIN;
23323 // Converting this to a max would handle comparisons between positive
23324 // and negative zero incorrectly.
23325 if (!DAG.getTarget().Options.UnsafeFPMath &&
23326 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23328 Opcode = X86ISD::FMAX;
23331 // Converting this to a max would handle NaNs incorrectly, and swapping
23332 // the operands would cause it to handle comparisons between positive
23333 // and negative zero incorrectly.
23334 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23335 if (!DAG.getTarget().Options.UnsafeFPMath &&
23336 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23338 std::swap(LHS, RHS);
23340 Opcode = X86ISD::FMAX;
23343 // Converting this to a max would handle both negative zeros and NaNs
23344 // incorrectly, but we can swap the operands to fix both.
23345 std::swap(LHS, RHS);
23349 Opcode = X86ISD::FMAX;
23352 // Check for x CC y ? y : x -- a min/max with reversed arms.
23353 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23354 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23358 // Converting this to a min would handle comparisons between positive
23359 // and negative zero incorrectly, and swapping the operands would
23360 // cause it to handle NaNs incorrectly.
23361 if (!DAG.getTarget().Options.UnsafeFPMath &&
23362 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23363 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23365 std::swap(LHS, RHS);
23367 Opcode = X86ISD::FMIN;
23370 // Converting this to a min would handle NaNs incorrectly.
23371 if (!DAG.getTarget().Options.UnsafeFPMath &&
23372 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23374 Opcode = X86ISD::FMIN;
23377 // Converting this to a min would handle both negative zeros and NaNs
23378 // incorrectly, but we can swap the operands to fix both.
23379 std::swap(LHS, RHS);
23383 Opcode = X86ISD::FMIN;
23387 // Converting this to a max would handle NaNs incorrectly.
23388 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23390 Opcode = X86ISD::FMAX;
23393 // Converting this to a max would handle comparisons between positive
23394 // and negative zero incorrectly, and swapping the operands would
23395 // cause it to handle NaNs incorrectly.
23396 if (!DAG.getTarget().Options.UnsafeFPMath &&
23397 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23398 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23400 std::swap(LHS, RHS);
23402 Opcode = X86ISD::FMAX;
23405 // Converting this to a max would handle both negative zeros and NaNs
23406 // incorrectly, but we can swap the operands to fix both.
23407 std::swap(LHS, RHS);
23411 Opcode = X86ISD::FMAX;
23417 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23420 EVT CondVT = Cond.getValueType();
23421 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23422 CondVT.getVectorElementType() == MVT::i1) {
23423 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23424 // lowering on KNL. In this case we convert it to
23425 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23426 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23427 // Since SKX these selects have a proper lowering.
23428 EVT OpVT = LHS.getValueType();
23429 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23430 (OpVT.getVectorElementType() == MVT::i8 ||
23431 OpVT.getVectorElementType() == MVT::i16) &&
23432 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23433 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23434 DCI.AddToWorklist(Cond.getNode());
23435 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23438 // If this is a select between two integer constants, try to do some
23440 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23441 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23442 // Don't do this for crazy integer types.
23443 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23444 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23445 // so that TrueC (the true value) is larger than FalseC.
23446 bool NeedsCondInvert = false;
23448 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23449 // Efficiently invertible.
23450 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23451 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23452 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23453 NeedsCondInvert = true;
23454 std::swap(TrueC, FalseC);
23457 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23458 if (FalseC->getAPIntValue() == 0 &&
23459 TrueC->getAPIntValue().isPowerOf2()) {
23460 if (NeedsCondInvert) // Invert the condition if needed.
23461 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23462 DAG.getConstant(1, Cond.getValueType()));
23464 // Zero extend the condition if needed.
23465 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23467 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23468 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23469 DAG.getConstant(ShAmt, MVT::i8));
23472 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23473 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23474 if (NeedsCondInvert) // Invert the condition if needed.
23475 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23476 DAG.getConstant(1, Cond.getValueType()));
23478 // Zero extend the condition if needed.
23479 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23480 FalseC->getValueType(0), Cond);
23481 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23482 SDValue(FalseC, 0));
23485 // Optimize cases that will turn into an LEA instruction. This requires
23486 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23487 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23488 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23489 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23491 bool isFastMultiplier = false;
23493 switch ((unsigned char)Diff) {
23495 case 1: // result = add base, cond
23496 case 2: // result = lea base( , cond*2)
23497 case 3: // result = lea base(cond, cond*2)
23498 case 4: // result = lea base( , cond*4)
23499 case 5: // result = lea base(cond, cond*4)
23500 case 8: // result = lea base( , cond*8)
23501 case 9: // result = lea base(cond, cond*8)
23502 isFastMultiplier = true;
23507 if (isFastMultiplier) {
23508 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23509 if (NeedsCondInvert) // Invert the condition if needed.
23510 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23511 DAG.getConstant(1, Cond.getValueType()));
23513 // Zero extend the condition if needed.
23514 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23516 // Scale the condition by the difference.
23518 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23519 DAG.getConstant(Diff, Cond.getValueType()));
23521 // Add the base if non-zero.
23522 if (FalseC->getAPIntValue() != 0)
23523 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23524 SDValue(FalseC, 0));
23531 // Canonicalize max and min:
23532 // (x > y) ? x : y -> (x >= y) ? x : y
23533 // (x < y) ? x : y -> (x <= y) ? x : y
23534 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23535 // the need for an extra compare
23536 // against zero. e.g.
23537 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23539 // testl %edi, %edi
23541 // cmovgl %edi, %eax
23545 // cmovsl %eax, %edi
23546 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23547 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23548 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23549 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23554 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23555 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23556 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23557 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23562 // Early exit check
23563 if (!TLI.isTypeLegal(VT))
23566 // Match VSELECTs into subs with unsigned saturation.
23567 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23568 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23569 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23570 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23571 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23573 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23574 // left side invert the predicate to simplify logic below.
23576 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23578 CC = ISD::getSetCCInverse(CC, true);
23579 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23583 if (Other.getNode() && Other->getNumOperands() == 2 &&
23584 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23585 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23586 SDValue CondRHS = Cond->getOperand(1);
23588 // Look for a general sub with unsigned saturation first.
23589 // x >= y ? x-y : 0 --> subus x, y
23590 // x > y ? x-y : 0 --> subus x, y
23591 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23592 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23593 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23595 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23596 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23597 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23598 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23599 // If the RHS is a constant we have to reverse the const
23600 // canonicalization.
23601 // x > C-1 ? x+-C : 0 --> subus x, C
23602 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23603 CondRHSConst->getAPIntValue() ==
23604 (-OpRHSConst->getAPIntValue() - 1))
23605 return DAG.getNode(
23606 X86ISD::SUBUS, DL, VT, OpLHS,
23607 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23609 // Another special case: If C was a sign bit, the sub has been
23610 // canonicalized into a xor.
23611 // FIXME: Would it be better to use computeKnownBits to determine
23612 // whether it's safe to decanonicalize the xor?
23613 // x s< 0 ? x^C : 0 --> subus x, C
23614 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23615 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23616 OpRHSConst->getAPIntValue().isSignBit())
23617 // Note that we have to rebuild the RHS constant here to ensure we
23618 // don't rely on particular values of undef lanes.
23619 return DAG.getNode(
23620 X86ISD::SUBUS, DL, VT, OpLHS,
23621 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23626 // Try to match a min/max vector operation.
23627 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23628 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23629 unsigned Opc = ret.first;
23630 bool NeedSplit = ret.second;
23632 if (Opc && NeedSplit) {
23633 unsigned NumElems = VT.getVectorNumElements();
23634 // Extract the LHS vectors
23635 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23636 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23638 // Extract the RHS vectors
23639 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23640 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23642 // Create min/max for each subvector
23643 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23644 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23646 // Merge the result
23647 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23649 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23652 // Simplify vector selection if condition value type matches vselect
23654 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23655 assert(Cond.getValueType().isVector() &&
23656 "vector select expects a vector selector!");
23658 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23659 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23661 // Try invert the condition if true value is not all 1s and false value
23663 if (!TValIsAllOnes && !FValIsAllZeros &&
23664 // Check if the selector will be produced by CMPP*/PCMP*
23665 Cond.getOpcode() == ISD::SETCC &&
23666 // Check if SETCC has already been promoted
23667 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23668 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23669 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23671 if (TValIsAllZeros || FValIsAllOnes) {
23672 SDValue CC = Cond.getOperand(2);
23673 ISD::CondCode NewCC =
23674 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23675 Cond.getOperand(0).getValueType().isInteger());
23676 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23677 std::swap(LHS, RHS);
23678 TValIsAllOnes = FValIsAllOnes;
23679 FValIsAllZeros = TValIsAllZeros;
23683 if (TValIsAllOnes || FValIsAllZeros) {
23686 if (TValIsAllOnes && FValIsAllZeros)
23688 else if (TValIsAllOnes)
23689 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23690 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23691 else if (FValIsAllZeros)
23692 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23693 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23695 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23699 // If we know that this node is legal then we know that it is going to be
23700 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23701 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23702 // to simplify previous instructions.
23703 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23704 !DCI.isBeforeLegalize() &&
23705 // We explicitly check against v8i16 and v16i16 because, although
23706 // they're marked as Custom, they might only be legal when Cond is a
23707 // build_vector of constants. This will be taken care in a later
23709 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23710 VT != MVT::v8i16) &&
23711 // Don't optimize vector of constants. Those are handled by
23712 // the generic code and all the bits must be properly set for
23713 // the generic optimizer.
23714 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23715 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23717 // Don't optimize vector selects that map to mask-registers.
23721 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23722 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23724 APInt KnownZero, KnownOne;
23725 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23726 DCI.isBeforeLegalizeOps());
23727 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23728 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23730 // If we changed the computation somewhere in the DAG, this change
23731 // will affect all users of Cond.
23732 // Make sure it is fine and update all the nodes so that we do not
23733 // use the generic VSELECT anymore. Otherwise, we may perform
23734 // wrong optimizations as we messed up with the actual expectation
23735 // for the vector boolean values.
23736 if (Cond != TLO.Old) {
23737 // Check all uses of that condition operand to check whether it will be
23738 // consumed by non-BLEND instructions, which may depend on all bits are
23740 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23742 if (I->getOpcode() != ISD::VSELECT)
23743 // TODO: Add other opcodes eventually lowered into BLEND.
23746 // Update all the users of the condition, before committing the change,
23747 // so that the VSELECT optimizations that expect the correct vector
23748 // boolean value will not be triggered.
23749 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23751 DAG.ReplaceAllUsesOfValueWith(
23753 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23754 Cond, I->getOperand(1), I->getOperand(2)));
23755 DCI.CommitTargetLoweringOpt(TLO);
23758 // At this point, only Cond is changed. Change the condition
23759 // just for N to keep the opportunity to optimize all other
23760 // users their own way.
23761 DAG.ReplaceAllUsesOfValueWith(
23763 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23764 TLO.New, N->getOperand(1), N->getOperand(2)));
23769 // We should generate an X86ISD::BLENDI from a vselect if its argument
23770 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23771 // constants. This specific pattern gets generated when we split a
23772 // selector for a 512 bit vector in a machine without AVX512 (but with
23773 // 256-bit vectors), during legalization:
23775 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23777 // Iff we find this pattern and the build_vectors are built from
23778 // constants, we translate the vselect into a shuffle_vector that we
23779 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23780 if ((N->getOpcode() == ISD::VSELECT ||
23781 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23782 !DCI.isBeforeLegalize()) {
23783 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23784 if (Shuffle.getNode())
23791 // Check whether a boolean test is testing a boolean value generated by
23792 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23795 // Simplify the following patterns:
23796 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23797 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23798 // to (Op EFLAGS Cond)
23800 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23801 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23802 // to (Op EFLAGS !Cond)
23804 // where Op could be BRCOND or CMOV.
23806 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23807 // Quit if not CMP and SUB with its value result used.
23808 if (Cmp.getOpcode() != X86ISD::CMP &&
23809 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23812 // Quit if not used as a boolean value.
23813 if (CC != X86::COND_E && CC != X86::COND_NE)
23816 // Check CMP operands. One of them should be 0 or 1 and the other should be
23817 // an SetCC or extended from it.
23818 SDValue Op1 = Cmp.getOperand(0);
23819 SDValue Op2 = Cmp.getOperand(1);
23822 const ConstantSDNode* C = nullptr;
23823 bool needOppositeCond = (CC == X86::COND_E);
23824 bool checkAgainstTrue = false; // Is it a comparison against 1?
23826 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23828 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23830 else // Quit if all operands are not constants.
23833 if (C->getZExtValue() == 1) {
23834 needOppositeCond = !needOppositeCond;
23835 checkAgainstTrue = true;
23836 } else if (C->getZExtValue() != 0)
23837 // Quit if the constant is neither 0 or 1.
23840 bool truncatedToBoolWithAnd = false;
23841 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23842 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23843 SetCC.getOpcode() == ISD::TRUNCATE ||
23844 SetCC.getOpcode() == ISD::AND) {
23845 if (SetCC.getOpcode() == ISD::AND) {
23847 ConstantSDNode *CS;
23848 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23849 CS->getZExtValue() == 1)
23851 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23852 CS->getZExtValue() == 1)
23856 SetCC = SetCC.getOperand(OpIdx);
23857 truncatedToBoolWithAnd = true;
23859 SetCC = SetCC.getOperand(0);
23862 switch (SetCC.getOpcode()) {
23863 case X86ISD::SETCC_CARRY:
23864 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
23865 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
23866 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
23867 // truncated to i1 using 'and'.
23868 if (checkAgainstTrue && !truncatedToBoolWithAnd)
23870 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
23871 "Invalid use of SETCC_CARRY!");
23873 case X86ISD::SETCC:
23874 // Set the condition code or opposite one if necessary.
23875 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
23876 if (needOppositeCond)
23877 CC = X86::GetOppositeBranchCondition(CC);
23878 return SetCC.getOperand(1);
23879 case X86ISD::CMOV: {
23880 // Check whether false/true value has canonical one, i.e. 0 or 1.
23881 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
23882 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
23883 // Quit if true value is not a constant.
23886 // Quit if false value is not a constant.
23888 SDValue Op = SetCC.getOperand(0);
23889 // Skip 'zext' or 'trunc' node.
23890 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
23891 Op.getOpcode() == ISD::TRUNCATE)
23892 Op = Op.getOperand(0);
23893 // A special case for rdrand/rdseed, where 0 is set if false cond is
23895 if ((Op.getOpcode() != X86ISD::RDRAND &&
23896 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
23899 // Quit if false value is not the constant 0 or 1.
23900 bool FValIsFalse = true;
23901 if (FVal && FVal->getZExtValue() != 0) {
23902 if (FVal->getZExtValue() != 1)
23904 // If FVal is 1, opposite cond is needed.
23905 needOppositeCond = !needOppositeCond;
23906 FValIsFalse = false;
23908 // Quit if TVal is not the constant opposite of FVal.
23909 if (FValIsFalse && TVal->getZExtValue() != 1)
23911 if (!FValIsFalse && TVal->getZExtValue() != 0)
23913 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
23914 if (needOppositeCond)
23915 CC = X86::GetOppositeBranchCondition(CC);
23916 return SetCC.getOperand(3);
23923 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
23924 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
23925 TargetLowering::DAGCombinerInfo &DCI,
23926 const X86Subtarget *Subtarget) {
23929 // If the flag operand isn't dead, don't touch this CMOV.
23930 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
23933 SDValue FalseOp = N->getOperand(0);
23934 SDValue TrueOp = N->getOperand(1);
23935 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
23936 SDValue Cond = N->getOperand(3);
23938 if (CC == X86::COND_E || CC == X86::COND_NE) {
23939 switch (Cond.getOpcode()) {
23943 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
23944 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
23945 return (CC == X86::COND_E) ? FalseOp : TrueOp;
23951 Flags = checkBoolTestSetCCCombine(Cond, CC);
23952 if (Flags.getNode() &&
23953 // Extra check as FCMOV only supports a subset of X86 cond.
23954 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
23955 SDValue Ops[] = { FalseOp, TrueOp,
23956 DAG.getConstant(CC, MVT::i8), Flags };
23957 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
23960 // If this is a select between two integer constants, try to do some
23961 // optimizations. Note that the operands are ordered the opposite of SELECT
23963 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
23964 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
23965 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
23966 // larger than FalseC (the false value).
23967 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
23968 CC = X86::GetOppositeBranchCondition(CC);
23969 std::swap(TrueC, FalseC);
23970 std::swap(TrueOp, FalseOp);
23973 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
23974 // This is efficient for any integer data type (including i8/i16) and
23976 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
23977 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23978 DAG.getConstant(CC, MVT::i8), Cond);
23980 // Zero extend the condition if needed.
23981 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
23983 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23984 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
23985 DAG.getConstant(ShAmt, MVT::i8));
23986 if (N->getNumValues() == 2) // Dead flag value?
23987 return DCI.CombineTo(N, Cond, SDValue());
23991 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
23992 // for any integer data type, including i8/i16.
23993 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23994 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23995 DAG.getConstant(CC, MVT::i8), Cond);
23997 // Zero extend the condition if needed.
23998 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23999 FalseC->getValueType(0), Cond);
24000 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24001 SDValue(FalseC, 0));
24003 if (N->getNumValues() == 2) // Dead flag value?
24004 return DCI.CombineTo(N, Cond, SDValue());
24008 // Optimize cases that will turn into an LEA instruction. This requires
24009 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24010 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24011 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24012 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24014 bool isFastMultiplier = false;
24016 switch ((unsigned char)Diff) {
24018 case 1: // result = add base, cond
24019 case 2: // result = lea base( , cond*2)
24020 case 3: // result = lea base(cond, cond*2)
24021 case 4: // result = lea base( , cond*4)
24022 case 5: // result = lea base(cond, cond*4)
24023 case 8: // result = lea base( , cond*8)
24024 case 9: // result = lea base(cond, cond*8)
24025 isFastMultiplier = true;
24030 if (isFastMultiplier) {
24031 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24032 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24033 DAG.getConstant(CC, MVT::i8), Cond);
24034 // Zero extend the condition if needed.
24035 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24037 // Scale the condition by the difference.
24039 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24040 DAG.getConstant(Diff, Cond.getValueType()));
24042 // Add the base if non-zero.
24043 if (FalseC->getAPIntValue() != 0)
24044 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24045 SDValue(FalseC, 0));
24046 if (N->getNumValues() == 2) // Dead flag value?
24047 return DCI.CombineTo(N, Cond, SDValue());
24054 // Handle these cases:
24055 // (select (x != c), e, c) -> select (x != c), e, x),
24056 // (select (x == c), c, e) -> select (x == c), x, e)
24057 // where the c is an integer constant, and the "select" is the combination
24058 // of CMOV and CMP.
24060 // The rationale for this change is that the conditional-move from a constant
24061 // needs two instructions, however, conditional-move from a register needs
24062 // only one instruction.
24064 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24065 // some instruction-combining opportunities. This opt needs to be
24066 // postponed as late as possible.
24068 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24069 // the DCI.xxxx conditions are provided to postpone the optimization as
24070 // late as possible.
24072 ConstantSDNode *CmpAgainst = nullptr;
24073 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24074 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24075 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24077 if (CC == X86::COND_NE &&
24078 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24079 CC = X86::GetOppositeBranchCondition(CC);
24080 std::swap(TrueOp, FalseOp);
24083 if (CC == X86::COND_E &&
24084 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24085 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24086 DAG.getConstant(CC, MVT::i8), Cond };
24087 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24095 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24096 const X86Subtarget *Subtarget) {
24097 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24099 default: return SDValue();
24100 // SSE/AVX/AVX2 blend intrinsics.
24101 case Intrinsic::x86_avx2_pblendvb:
24102 case Intrinsic::x86_avx2_pblendw:
24103 case Intrinsic::x86_avx2_pblendd_128:
24104 case Intrinsic::x86_avx2_pblendd_256:
24105 // Don't try to simplify this intrinsic if we don't have AVX2.
24106 if (!Subtarget->hasAVX2())
24109 case Intrinsic::x86_avx_blend_pd_256:
24110 case Intrinsic::x86_avx_blend_ps_256:
24111 case Intrinsic::x86_avx_blendv_pd_256:
24112 case Intrinsic::x86_avx_blendv_ps_256:
24113 // Don't try to simplify this intrinsic if we don't have AVX.
24114 if (!Subtarget->hasAVX())
24117 case Intrinsic::x86_sse41_pblendw:
24118 case Intrinsic::x86_sse41_blendpd:
24119 case Intrinsic::x86_sse41_blendps:
24120 case Intrinsic::x86_sse41_blendvps:
24121 case Intrinsic::x86_sse41_blendvpd:
24122 case Intrinsic::x86_sse41_pblendvb: {
24123 SDValue Op0 = N->getOperand(1);
24124 SDValue Op1 = N->getOperand(2);
24125 SDValue Mask = N->getOperand(3);
24127 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24128 if (!Subtarget->hasSSE41())
24131 // fold (blend A, A, Mask) -> A
24134 // fold (blend A, B, allZeros) -> A
24135 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24137 // fold (blend A, B, allOnes) -> B
24138 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24141 // Simplify the case where the mask is a constant i32 value.
24142 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24143 if (C->isNullValue())
24145 if (C->isAllOnesValue())
24152 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24153 case Intrinsic::x86_sse2_psrai_w:
24154 case Intrinsic::x86_sse2_psrai_d:
24155 case Intrinsic::x86_avx2_psrai_w:
24156 case Intrinsic::x86_avx2_psrai_d:
24157 case Intrinsic::x86_sse2_psra_w:
24158 case Intrinsic::x86_sse2_psra_d:
24159 case Intrinsic::x86_avx2_psra_w:
24160 case Intrinsic::x86_avx2_psra_d: {
24161 SDValue Op0 = N->getOperand(1);
24162 SDValue Op1 = N->getOperand(2);
24163 EVT VT = Op0.getValueType();
24164 assert(VT.isVector() && "Expected a vector type!");
24166 if (isa<BuildVectorSDNode>(Op1))
24167 Op1 = Op1.getOperand(0);
24169 if (!isa<ConstantSDNode>(Op1))
24172 EVT SVT = VT.getVectorElementType();
24173 unsigned SVTBits = SVT.getSizeInBits();
24175 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24176 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24177 uint64_t ShAmt = C.getZExtValue();
24179 // Don't try to convert this shift into a ISD::SRA if the shift
24180 // count is bigger than or equal to the element size.
24181 if (ShAmt >= SVTBits)
24184 // Trivial case: if the shift count is zero, then fold this
24185 // into the first operand.
24189 // Replace this packed shift intrinsic with a target independent
24191 SDValue Splat = DAG.getConstant(C, VT);
24192 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24197 /// PerformMulCombine - Optimize a single multiply with constant into two
24198 /// in order to implement it with two cheaper instructions, e.g.
24199 /// LEA + SHL, LEA + LEA.
24200 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24201 TargetLowering::DAGCombinerInfo &DCI) {
24202 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24205 EVT VT = N->getValueType(0);
24206 if (VT != MVT::i64 && VT != MVT::i32)
24209 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24212 uint64_t MulAmt = C->getZExtValue();
24213 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24216 uint64_t MulAmt1 = 0;
24217 uint64_t MulAmt2 = 0;
24218 if ((MulAmt % 9) == 0) {
24220 MulAmt2 = MulAmt / 9;
24221 } else if ((MulAmt % 5) == 0) {
24223 MulAmt2 = MulAmt / 5;
24224 } else if ((MulAmt % 3) == 0) {
24226 MulAmt2 = MulAmt / 3;
24229 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24232 if (isPowerOf2_64(MulAmt2) &&
24233 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24234 // If second multiplifer is pow2, issue it first. We want the multiply by
24235 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24237 std::swap(MulAmt1, MulAmt2);
24240 if (isPowerOf2_64(MulAmt1))
24241 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24242 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24244 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24245 DAG.getConstant(MulAmt1, VT));
24247 if (isPowerOf2_64(MulAmt2))
24248 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24249 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24251 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24252 DAG.getConstant(MulAmt2, VT));
24254 // Do not add new nodes to DAG combiner worklist.
24255 DCI.CombineTo(N, NewMul, false);
24260 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24261 SDValue N0 = N->getOperand(0);
24262 SDValue N1 = N->getOperand(1);
24263 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24264 EVT VT = N0.getValueType();
24266 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24267 // since the result of setcc_c is all zero's or all ones.
24268 if (VT.isInteger() && !VT.isVector() &&
24269 N1C && N0.getOpcode() == ISD::AND &&
24270 N0.getOperand(1).getOpcode() == ISD::Constant) {
24271 SDValue N00 = N0.getOperand(0);
24272 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24273 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24274 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24275 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24276 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24277 APInt ShAmt = N1C->getAPIntValue();
24278 Mask = Mask.shl(ShAmt);
24280 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24281 N00, DAG.getConstant(Mask, VT));
24285 // Hardware support for vector shifts is sparse which makes us scalarize the
24286 // vector operations in many cases. Also, on sandybridge ADD is faster than
24288 // (shl V, 1) -> add V,V
24289 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24290 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24291 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24292 // We shift all of the values by one. In many cases we do not have
24293 // hardware support for this operation. This is better expressed as an ADD
24295 if (N1SplatC->getZExtValue() == 1)
24296 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24302 /// \brief Returns a vector of 0s if the node in input is a vector logical
24303 /// shift by a constant amount which is known to be bigger than or equal
24304 /// to the vector element size in bits.
24305 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24306 const X86Subtarget *Subtarget) {
24307 EVT VT = N->getValueType(0);
24309 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24310 (!Subtarget->hasInt256() ||
24311 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24314 SDValue Amt = N->getOperand(1);
24316 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24317 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24318 APInt ShiftAmt = AmtSplat->getAPIntValue();
24319 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24321 // SSE2/AVX2 logical shifts always return a vector of 0s
24322 // if the shift amount is bigger than or equal to
24323 // the element size. The constant shift amount will be
24324 // encoded as a 8-bit immediate.
24325 if (ShiftAmt.trunc(8).uge(MaxAmount))
24326 return getZeroVector(VT, Subtarget, DAG, DL);
24332 /// PerformShiftCombine - Combine shifts.
24333 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24334 TargetLowering::DAGCombinerInfo &DCI,
24335 const X86Subtarget *Subtarget) {
24336 if (N->getOpcode() == ISD::SHL) {
24337 SDValue V = PerformSHLCombine(N, DAG);
24338 if (V.getNode()) return V;
24341 if (N->getOpcode() != ISD::SRA) {
24342 // Try to fold this logical shift into a zero vector.
24343 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24344 if (V.getNode()) return V;
24350 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24351 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24352 // and friends. Likewise for OR -> CMPNEQSS.
24353 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24354 TargetLowering::DAGCombinerInfo &DCI,
24355 const X86Subtarget *Subtarget) {
24358 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24359 // we're requiring SSE2 for both.
24360 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24361 SDValue N0 = N->getOperand(0);
24362 SDValue N1 = N->getOperand(1);
24363 SDValue CMP0 = N0->getOperand(1);
24364 SDValue CMP1 = N1->getOperand(1);
24367 // The SETCCs should both refer to the same CMP.
24368 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24371 SDValue CMP00 = CMP0->getOperand(0);
24372 SDValue CMP01 = CMP0->getOperand(1);
24373 EVT VT = CMP00.getValueType();
24375 if (VT == MVT::f32 || VT == MVT::f64) {
24376 bool ExpectingFlags = false;
24377 // Check for any users that want flags:
24378 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24379 !ExpectingFlags && UI != UE; ++UI)
24380 switch (UI->getOpcode()) {
24385 ExpectingFlags = true;
24387 case ISD::CopyToReg:
24388 case ISD::SIGN_EXTEND:
24389 case ISD::ZERO_EXTEND:
24390 case ISD::ANY_EXTEND:
24394 if (!ExpectingFlags) {
24395 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24396 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24398 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24399 X86::CondCode tmp = cc0;
24404 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24405 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24406 // FIXME: need symbolic constants for these magic numbers.
24407 // See X86ATTInstPrinter.cpp:printSSECC().
24408 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24409 if (Subtarget->hasAVX512()) {
24410 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24411 CMP01, DAG.getConstant(x86cc, MVT::i8));
24412 if (N->getValueType(0) != MVT::i1)
24413 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24417 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24418 CMP00.getValueType(), CMP00, CMP01,
24419 DAG.getConstant(x86cc, MVT::i8));
24421 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24422 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24424 if (is64BitFP && !Subtarget->is64Bit()) {
24425 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24426 // 64-bit integer, since that's not a legal type. Since
24427 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24428 // bits, but can do this little dance to extract the lowest 32 bits
24429 // and work with those going forward.
24430 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24432 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24434 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24435 Vector32, DAG.getIntPtrConstant(0));
24439 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24440 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24441 DAG.getConstant(1, IntVT));
24442 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24443 return OneBitOfTruth;
24451 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24452 /// so it can be folded inside ANDNP.
24453 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24454 EVT VT = N->getValueType(0);
24456 // Match direct AllOnes for 128 and 256-bit vectors
24457 if (ISD::isBuildVectorAllOnes(N))
24460 // Look through a bit convert.
24461 if (N->getOpcode() == ISD::BITCAST)
24462 N = N->getOperand(0).getNode();
24464 // Sometimes the operand may come from a insert_subvector building a 256-bit
24466 if (VT.is256BitVector() &&
24467 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24468 SDValue V1 = N->getOperand(0);
24469 SDValue V2 = N->getOperand(1);
24471 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24472 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24473 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24474 ISD::isBuildVectorAllOnes(V2.getNode()))
24481 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24482 // register. In most cases we actually compare or select YMM-sized registers
24483 // and mixing the two types creates horrible code. This method optimizes
24484 // some of the transition sequences.
24485 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24486 TargetLowering::DAGCombinerInfo &DCI,
24487 const X86Subtarget *Subtarget) {
24488 EVT VT = N->getValueType(0);
24489 if (!VT.is256BitVector())
24492 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24493 N->getOpcode() == ISD::ZERO_EXTEND ||
24494 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24496 SDValue Narrow = N->getOperand(0);
24497 EVT NarrowVT = Narrow->getValueType(0);
24498 if (!NarrowVT.is128BitVector())
24501 if (Narrow->getOpcode() != ISD::XOR &&
24502 Narrow->getOpcode() != ISD::AND &&
24503 Narrow->getOpcode() != ISD::OR)
24506 SDValue N0 = Narrow->getOperand(0);
24507 SDValue N1 = Narrow->getOperand(1);
24510 // The Left side has to be a trunc.
24511 if (N0.getOpcode() != ISD::TRUNCATE)
24514 // The type of the truncated inputs.
24515 EVT WideVT = N0->getOperand(0)->getValueType(0);
24519 // The right side has to be a 'trunc' or a constant vector.
24520 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24521 ConstantSDNode *RHSConstSplat = nullptr;
24522 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24523 RHSConstSplat = RHSBV->getConstantSplatNode();
24524 if (!RHSTrunc && !RHSConstSplat)
24527 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24529 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24532 // Set N0 and N1 to hold the inputs to the new wide operation.
24533 N0 = N0->getOperand(0);
24534 if (RHSConstSplat) {
24535 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24536 SDValue(RHSConstSplat, 0));
24537 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24538 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24539 } else if (RHSTrunc) {
24540 N1 = N1->getOperand(0);
24543 // Generate the wide operation.
24544 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24545 unsigned Opcode = N->getOpcode();
24547 case ISD::ANY_EXTEND:
24549 case ISD::ZERO_EXTEND: {
24550 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24551 APInt Mask = APInt::getAllOnesValue(InBits);
24552 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24553 return DAG.getNode(ISD::AND, DL, VT,
24554 Op, DAG.getConstant(Mask, VT));
24556 case ISD::SIGN_EXTEND:
24557 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24558 Op, DAG.getValueType(NarrowVT));
24560 llvm_unreachable("Unexpected opcode");
24564 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24565 TargetLowering::DAGCombinerInfo &DCI,
24566 const X86Subtarget *Subtarget) {
24567 EVT VT = N->getValueType(0);
24568 if (DCI.isBeforeLegalizeOps())
24571 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24575 // Create BEXTR instructions
24576 // BEXTR is ((X >> imm) & (2**size-1))
24577 if (VT == MVT::i32 || VT == MVT::i64) {
24578 SDValue N0 = N->getOperand(0);
24579 SDValue N1 = N->getOperand(1);
24582 // Check for BEXTR.
24583 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24584 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24585 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24586 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24587 if (MaskNode && ShiftNode) {
24588 uint64_t Mask = MaskNode->getZExtValue();
24589 uint64_t Shift = ShiftNode->getZExtValue();
24590 if (isMask_64(Mask)) {
24591 uint64_t MaskSize = CountPopulation_64(Mask);
24592 if (Shift + MaskSize <= VT.getSizeInBits())
24593 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24594 DAG.getConstant(Shift | (MaskSize << 8), VT));
24602 // Want to form ANDNP nodes:
24603 // 1) In the hopes of then easily combining them with OR and AND nodes
24604 // to form PBLEND/PSIGN.
24605 // 2) To match ANDN packed intrinsics
24606 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24609 SDValue N0 = N->getOperand(0);
24610 SDValue N1 = N->getOperand(1);
24613 // Check LHS for vnot
24614 if (N0.getOpcode() == ISD::XOR &&
24615 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24616 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24617 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24619 // Check RHS for vnot
24620 if (N1.getOpcode() == ISD::XOR &&
24621 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24622 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24623 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24628 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24629 TargetLowering::DAGCombinerInfo &DCI,
24630 const X86Subtarget *Subtarget) {
24631 if (DCI.isBeforeLegalizeOps())
24634 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24638 SDValue N0 = N->getOperand(0);
24639 SDValue N1 = N->getOperand(1);
24640 EVT VT = N->getValueType(0);
24642 // look for psign/blend
24643 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24644 if (!Subtarget->hasSSSE3() ||
24645 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24648 // Canonicalize pandn to RHS
24649 if (N0.getOpcode() == X86ISD::ANDNP)
24651 // or (and (m, y), (pandn m, x))
24652 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24653 SDValue Mask = N1.getOperand(0);
24654 SDValue X = N1.getOperand(1);
24656 if (N0.getOperand(0) == Mask)
24657 Y = N0.getOperand(1);
24658 if (N0.getOperand(1) == Mask)
24659 Y = N0.getOperand(0);
24661 // Check to see if the mask appeared in both the AND and ANDNP and
24665 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24666 // Look through mask bitcast.
24667 if (Mask.getOpcode() == ISD::BITCAST)
24668 Mask = Mask.getOperand(0);
24669 if (X.getOpcode() == ISD::BITCAST)
24670 X = X.getOperand(0);
24671 if (Y.getOpcode() == ISD::BITCAST)
24672 Y = Y.getOperand(0);
24674 EVT MaskVT = Mask.getValueType();
24676 // Validate that the Mask operand is a vector sra node.
24677 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24678 // there is no psrai.b
24679 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24680 unsigned SraAmt = ~0;
24681 if (Mask.getOpcode() == ISD::SRA) {
24682 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24683 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24684 SraAmt = AmtConst->getZExtValue();
24685 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24686 SDValue SraC = Mask.getOperand(1);
24687 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24689 if ((SraAmt + 1) != EltBits)
24694 // Now we know we at least have a plendvb with the mask val. See if
24695 // we can form a psignb/w/d.
24696 // psign = x.type == y.type == mask.type && y = sub(0, x);
24697 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24698 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24699 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24700 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24701 "Unsupported VT for PSIGN");
24702 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24703 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24705 // PBLENDVB only available on SSE 4.1
24706 if (!Subtarget->hasSSE41())
24709 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24711 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24712 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24713 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24714 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24715 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24719 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24722 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24723 MachineFunction &MF = DAG.getMachineFunction();
24724 bool OptForSize = MF.getFunction()->getAttributes().
24725 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
24727 // SHLD/SHRD instructions have lower register pressure, but on some
24728 // platforms they have higher latency than the equivalent
24729 // series of shifts/or that would otherwise be generated.
24730 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24731 // have higher latencies and we are not optimizing for size.
24732 if (!OptForSize && Subtarget->isSHLDSlow())
24735 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24737 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24739 if (!N0.hasOneUse() || !N1.hasOneUse())
24742 SDValue ShAmt0 = N0.getOperand(1);
24743 if (ShAmt0.getValueType() != MVT::i8)
24745 SDValue ShAmt1 = N1.getOperand(1);
24746 if (ShAmt1.getValueType() != MVT::i8)
24748 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24749 ShAmt0 = ShAmt0.getOperand(0);
24750 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24751 ShAmt1 = ShAmt1.getOperand(0);
24754 unsigned Opc = X86ISD::SHLD;
24755 SDValue Op0 = N0.getOperand(0);
24756 SDValue Op1 = N1.getOperand(0);
24757 if (ShAmt0.getOpcode() == ISD::SUB) {
24758 Opc = X86ISD::SHRD;
24759 std::swap(Op0, Op1);
24760 std::swap(ShAmt0, ShAmt1);
24763 unsigned Bits = VT.getSizeInBits();
24764 if (ShAmt1.getOpcode() == ISD::SUB) {
24765 SDValue Sum = ShAmt1.getOperand(0);
24766 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24767 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24768 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24769 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24770 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24771 return DAG.getNode(Opc, DL, VT,
24773 DAG.getNode(ISD::TRUNCATE, DL,
24776 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24777 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24779 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24780 return DAG.getNode(Opc, DL, VT,
24781 N0.getOperand(0), N1.getOperand(0),
24782 DAG.getNode(ISD::TRUNCATE, DL,
24789 // Generate NEG and CMOV for integer abs.
24790 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24791 EVT VT = N->getValueType(0);
24793 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24794 // 8-bit integer abs to NEG and CMOV.
24795 if (VT.isInteger() && VT.getSizeInBits() == 8)
24798 SDValue N0 = N->getOperand(0);
24799 SDValue N1 = N->getOperand(1);
24802 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24803 // and change it to SUB and CMOV.
24804 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24805 N0.getOpcode() == ISD::ADD &&
24806 N0.getOperand(1) == N1 &&
24807 N1.getOpcode() == ISD::SRA &&
24808 N1.getOperand(0) == N0.getOperand(0))
24809 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24810 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24811 // Generate SUB & CMOV.
24812 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24813 DAG.getConstant(0, VT), N0.getOperand(0));
24815 SDValue Ops[] = { N0.getOperand(0), Neg,
24816 DAG.getConstant(X86::COND_GE, MVT::i8),
24817 SDValue(Neg.getNode(), 1) };
24818 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24823 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24824 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24825 TargetLowering::DAGCombinerInfo &DCI,
24826 const X86Subtarget *Subtarget) {
24827 if (DCI.isBeforeLegalizeOps())
24830 if (Subtarget->hasCMov()) {
24831 SDValue RV = performIntegerAbsCombine(N, DAG);
24839 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
24840 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
24841 TargetLowering::DAGCombinerInfo &DCI,
24842 const X86Subtarget *Subtarget) {
24843 LoadSDNode *Ld = cast<LoadSDNode>(N);
24844 EVT RegVT = Ld->getValueType(0);
24845 EVT MemVT = Ld->getMemoryVT();
24847 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24849 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
24850 // into two 16-byte operations.
24851 ISD::LoadExtType Ext = Ld->getExtensionType();
24852 unsigned Alignment = Ld->getAlignment();
24853 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
24854 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24855 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
24856 unsigned NumElems = RegVT.getVectorNumElements();
24860 SDValue Ptr = Ld->getBasePtr();
24861 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
24863 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
24865 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24866 Ld->getPointerInfo(), Ld->isVolatile(),
24867 Ld->isNonTemporal(), Ld->isInvariant(),
24869 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
24870 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24871 Ld->getPointerInfo(), Ld->isVolatile(),
24872 Ld->isNonTemporal(), Ld->isInvariant(),
24873 std::min(16U, Alignment));
24874 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
24876 Load2.getValue(1));
24878 SDValue NewVec = DAG.getUNDEF(RegVT);
24879 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
24880 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
24881 return DCI.CombineTo(N, NewVec, TF, true);
24887 /// PerformMLOADCombine - Resolve extending loads
24888 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
24889 TargetLowering::DAGCombinerInfo &DCI,
24890 const X86Subtarget *Subtarget) {
24891 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
24892 if (Mld->getExtensionType() != ISD::SEXTLOAD)
24895 EVT VT = Mld->getValueType(0);
24896 unsigned NumElems = VT.getVectorNumElements();
24897 EVT LdVT = Mld->getMemoryVT();
24900 assert(LdVT != VT && "Cannot extend to the same type");
24901 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
24902 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
24903 // From, To sizes and ElemCount must be pow of two
24904 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24905 "Unexpected size for extending masked load");
24907 unsigned SizeRatio = ToSz / FromSz;
24908 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
24910 // Create a type on which we perform the shuffle
24911 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24912 LdVT.getScalarType(), NumElems*SizeRatio);
24913 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24915 // Convert Src0 value
24916 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
24917 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
24918 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24919 for (unsigned i = 0; i != NumElems; ++i)
24920 ShuffleVec[i] = i * SizeRatio;
24922 // Can't shuffle using an illegal type.
24923 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24924 && "WideVecVT should be legal");
24925 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
24926 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
24928 // Prepare the new mask
24930 SDValue Mask = Mld->getMask();
24931 if (Mask.getValueType() == VT) {
24932 // Mask and original value have the same type
24933 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24934 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24935 for (unsigned i = 0; i != NumElems; ++i)
24936 ShuffleVec[i] = i * SizeRatio;
24937 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24938 ShuffleVec[i] = NumElems*SizeRatio;
24939 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24940 DAG.getConstant(0, WideVecVT),
24944 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24945 unsigned WidenNumElts = NumElems*SizeRatio;
24946 unsigned MaskNumElts = VT.getVectorNumElements();
24947 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24950 unsigned NumConcat = WidenNumElts / MaskNumElts;
24951 SmallVector<SDValue, 16> Ops(NumConcat);
24952 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24954 for (unsigned i = 1; i != NumConcat; ++i)
24957 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24960 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
24961 Mld->getBasePtr(), NewMask, WideSrc0,
24962 Mld->getMemoryVT(), Mld->getMemOperand(),
24964 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
24965 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
24968 /// PerformMSTORECombine - Resolve truncating stores
24969 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
24970 const X86Subtarget *Subtarget) {
24971 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
24972 if (!Mst->isTruncatingStore())
24975 EVT VT = Mst->getValue().getValueType();
24976 unsigned NumElems = VT.getVectorNumElements();
24977 EVT StVT = Mst->getMemoryVT();
24980 assert(StVT != VT && "Cannot truncate to the same type");
24981 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24982 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24984 // From, To sizes and ElemCount must be pow of two
24985 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24986 "Unexpected size for truncating masked store");
24987 // We are going to use the original vector elt for storing.
24988 // Accumulated smaller vector elements must be a multiple of the store size.
24989 assert (((NumElems * FromSz) % ToSz) == 0 &&
24990 "Unexpected ratio for truncating masked store");
24992 unsigned SizeRatio = FromSz / ToSz;
24993 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24995 // Create a type on which we perform the shuffle
24996 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24997 StVT.getScalarType(), NumElems*SizeRatio);
24999 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25001 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25002 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25003 for (unsigned i = 0; i != NumElems; ++i)
25004 ShuffleVec[i] = i * SizeRatio;
25006 // Can't shuffle using an illegal type.
25007 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25008 && "WideVecVT should be legal");
25010 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25011 DAG.getUNDEF(WideVecVT),
25015 SDValue Mask = Mst->getMask();
25016 if (Mask.getValueType() == VT) {
25017 // Mask and original value have the same type
25018 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25019 for (unsigned i = 0; i != NumElems; ++i)
25020 ShuffleVec[i] = i * SizeRatio;
25021 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25022 ShuffleVec[i] = NumElems*SizeRatio;
25023 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25024 DAG.getConstant(0, WideVecVT),
25028 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25029 unsigned WidenNumElts = NumElems*SizeRatio;
25030 unsigned MaskNumElts = VT.getVectorNumElements();
25031 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25034 unsigned NumConcat = WidenNumElts / MaskNumElts;
25035 SmallVector<SDValue, 16> Ops(NumConcat);
25036 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25038 for (unsigned i = 1; i != NumConcat; ++i)
25041 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25044 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25045 NewMask, StVT, Mst->getMemOperand(), false);
25047 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25048 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25049 const X86Subtarget *Subtarget) {
25050 StoreSDNode *St = cast<StoreSDNode>(N);
25051 EVT VT = St->getValue().getValueType();
25052 EVT StVT = St->getMemoryVT();
25054 SDValue StoredVal = St->getOperand(1);
25055 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25057 // If we are saving a concatenation of two XMM registers and 32-byte stores
25058 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25059 unsigned Alignment = St->getAlignment();
25060 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25061 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25062 StVT == VT && !IsAligned) {
25063 unsigned NumElems = VT.getVectorNumElements();
25067 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25068 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25070 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25071 SDValue Ptr0 = St->getBasePtr();
25072 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25074 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25075 St->getPointerInfo(), St->isVolatile(),
25076 St->isNonTemporal(), Alignment);
25077 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25078 St->getPointerInfo(), St->isVolatile(),
25079 St->isNonTemporal(),
25080 std::min(16U, Alignment));
25081 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25084 // Optimize trunc store (of multiple scalars) to shuffle and store.
25085 // First, pack all of the elements in one place. Next, store to memory
25086 // in fewer chunks.
25087 if (St->isTruncatingStore() && VT.isVector()) {
25088 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25089 unsigned NumElems = VT.getVectorNumElements();
25090 assert(StVT != VT && "Cannot truncate to the same type");
25091 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25092 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25094 // From, To sizes and ElemCount must be pow of two
25095 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25096 // We are going to use the original vector elt for storing.
25097 // Accumulated smaller vector elements must be a multiple of the store size.
25098 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25100 unsigned SizeRatio = FromSz / ToSz;
25102 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25104 // Create a type on which we perform the shuffle
25105 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25106 StVT.getScalarType(), NumElems*SizeRatio);
25108 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25110 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25111 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25112 for (unsigned i = 0; i != NumElems; ++i)
25113 ShuffleVec[i] = i * SizeRatio;
25115 // Can't shuffle using an illegal type.
25116 if (!TLI.isTypeLegal(WideVecVT))
25119 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25120 DAG.getUNDEF(WideVecVT),
25122 // At this point all of the data is stored at the bottom of the
25123 // register. We now need to save it to mem.
25125 // Find the largest store unit
25126 MVT StoreType = MVT::i8;
25127 for (MVT Tp : MVT::integer_valuetypes()) {
25128 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25132 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25133 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25134 (64 <= NumElems * ToSz))
25135 StoreType = MVT::f64;
25137 // Bitcast the original vector into a vector of store-size units
25138 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25139 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25140 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25141 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25142 SmallVector<SDValue, 8> Chains;
25143 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25144 TLI.getPointerTy());
25145 SDValue Ptr = St->getBasePtr();
25147 // Perform one or more big stores into memory.
25148 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25149 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25150 StoreType, ShuffWide,
25151 DAG.getIntPtrConstant(i));
25152 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25153 St->getPointerInfo(), St->isVolatile(),
25154 St->isNonTemporal(), St->getAlignment());
25155 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25156 Chains.push_back(Ch);
25159 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25162 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25163 // the FP state in cases where an emms may be missing.
25164 // A preferable solution to the general problem is to figure out the right
25165 // places to insert EMMS. This qualifies as a quick hack.
25167 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25168 if (VT.getSizeInBits() != 64)
25171 const Function *F = DAG.getMachineFunction().getFunction();
25172 bool NoImplicitFloatOps = F->getAttributes().
25173 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
25174 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25175 && Subtarget->hasSSE2();
25176 if ((VT.isVector() ||
25177 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25178 isa<LoadSDNode>(St->getValue()) &&
25179 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25180 St->getChain().hasOneUse() && !St->isVolatile()) {
25181 SDNode* LdVal = St->getValue().getNode();
25182 LoadSDNode *Ld = nullptr;
25183 int TokenFactorIndex = -1;
25184 SmallVector<SDValue, 8> Ops;
25185 SDNode* ChainVal = St->getChain().getNode();
25186 // Must be a store of a load. We currently handle two cases: the load
25187 // is a direct child, and it's under an intervening TokenFactor. It is
25188 // possible to dig deeper under nested TokenFactors.
25189 if (ChainVal == LdVal)
25190 Ld = cast<LoadSDNode>(St->getChain());
25191 else if (St->getValue().hasOneUse() &&
25192 ChainVal->getOpcode() == ISD::TokenFactor) {
25193 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25194 if (ChainVal->getOperand(i).getNode() == LdVal) {
25195 TokenFactorIndex = i;
25196 Ld = cast<LoadSDNode>(St->getValue());
25198 Ops.push_back(ChainVal->getOperand(i));
25202 if (!Ld || !ISD::isNormalLoad(Ld))
25205 // If this is not the MMX case, i.e. we are just turning i64 load/store
25206 // into f64 load/store, avoid the transformation if there are multiple
25207 // uses of the loaded value.
25208 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25213 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25214 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25216 if (Subtarget->is64Bit() || F64IsLegal) {
25217 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25218 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25219 Ld->getPointerInfo(), Ld->isVolatile(),
25220 Ld->isNonTemporal(), Ld->isInvariant(),
25221 Ld->getAlignment());
25222 SDValue NewChain = NewLd.getValue(1);
25223 if (TokenFactorIndex != -1) {
25224 Ops.push_back(NewChain);
25225 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25227 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25228 St->getPointerInfo(),
25229 St->isVolatile(), St->isNonTemporal(),
25230 St->getAlignment());
25233 // Otherwise, lower to two pairs of 32-bit loads / stores.
25234 SDValue LoAddr = Ld->getBasePtr();
25235 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25236 DAG.getConstant(4, MVT::i32));
25238 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25239 Ld->getPointerInfo(),
25240 Ld->isVolatile(), Ld->isNonTemporal(),
25241 Ld->isInvariant(), Ld->getAlignment());
25242 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25243 Ld->getPointerInfo().getWithOffset(4),
25244 Ld->isVolatile(), Ld->isNonTemporal(),
25246 MinAlign(Ld->getAlignment(), 4));
25248 SDValue NewChain = LoLd.getValue(1);
25249 if (TokenFactorIndex != -1) {
25250 Ops.push_back(LoLd);
25251 Ops.push_back(HiLd);
25252 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25255 LoAddr = St->getBasePtr();
25256 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25257 DAG.getConstant(4, MVT::i32));
25259 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25260 St->getPointerInfo(),
25261 St->isVolatile(), St->isNonTemporal(),
25262 St->getAlignment());
25263 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25264 St->getPointerInfo().getWithOffset(4),
25266 St->isNonTemporal(),
25267 MinAlign(St->getAlignment(), 4));
25268 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25273 /// Return 'true' if this vector operation is "horizontal"
25274 /// and return the operands for the horizontal operation in LHS and RHS. A
25275 /// horizontal operation performs the binary operation on successive elements
25276 /// of its first operand, then on successive elements of its second operand,
25277 /// returning the resulting values in a vector. For example, if
25278 /// A = < float a0, float a1, float a2, float a3 >
25280 /// B = < float b0, float b1, float b2, float b3 >
25281 /// then the result of doing a horizontal operation on A and B is
25282 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25283 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25284 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25285 /// set to A, RHS to B, and the routine returns 'true'.
25286 /// Note that the binary operation should have the property that if one of the
25287 /// operands is UNDEF then the result is UNDEF.
25288 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25289 // Look for the following pattern: if
25290 // A = < float a0, float a1, float a2, float a3 >
25291 // B = < float b0, float b1, float b2, float b3 >
25293 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25294 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25295 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25296 // which is A horizontal-op B.
25298 // At least one of the operands should be a vector shuffle.
25299 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25300 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25303 MVT VT = LHS.getSimpleValueType();
25305 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25306 "Unsupported vector type for horizontal add/sub");
25308 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25309 // operate independently on 128-bit lanes.
25310 unsigned NumElts = VT.getVectorNumElements();
25311 unsigned NumLanes = VT.getSizeInBits()/128;
25312 unsigned NumLaneElts = NumElts / NumLanes;
25313 assert((NumLaneElts % 2 == 0) &&
25314 "Vector type should have an even number of elements in each lane");
25315 unsigned HalfLaneElts = NumLaneElts/2;
25317 // View LHS in the form
25318 // LHS = VECTOR_SHUFFLE A, B, LMask
25319 // If LHS is not a shuffle then pretend it is the shuffle
25320 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25321 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25324 SmallVector<int, 16> LMask(NumElts);
25325 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25326 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25327 A = LHS.getOperand(0);
25328 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25329 B = LHS.getOperand(1);
25330 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25331 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25333 if (LHS.getOpcode() != ISD::UNDEF)
25335 for (unsigned i = 0; i != NumElts; ++i)
25339 // Likewise, view RHS in the form
25340 // RHS = VECTOR_SHUFFLE C, D, RMask
25342 SmallVector<int, 16> RMask(NumElts);
25343 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25344 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25345 C = RHS.getOperand(0);
25346 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25347 D = RHS.getOperand(1);
25348 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25349 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25351 if (RHS.getOpcode() != ISD::UNDEF)
25353 for (unsigned i = 0; i != NumElts; ++i)
25357 // Check that the shuffles are both shuffling the same vectors.
25358 if (!(A == C && B == D) && !(A == D && B == C))
25361 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25362 if (!A.getNode() && !B.getNode())
25365 // If A and B occur in reverse order in RHS, then "swap" them (which means
25366 // rewriting the mask).
25368 CommuteVectorShuffleMask(RMask, NumElts);
25370 // At this point LHS and RHS are equivalent to
25371 // LHS = VECTOR_SHUFFLE A, B, LMask
25372 // RHS = VECTOR_SHUFFLE A, B, RMask
25373 // Check that the masks correspond to performing a horizontal operation.
25374 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25375 for (unsigned i = 0; i != NumLaneElts; ++i) {
25376 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25378 // Ignore any UNDEF components.
25379 if (LIdx < 0 || RIdx < 0 ||
25380 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25381 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25384 // Check that successive elements are being operated on. If not, this is
25385 // not a horizontal operation.
25386 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25387 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25388 if (!(LIdx == Index && RIdx == Index + 1) &&
25389 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25394 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25395 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25399 /// Do target-specific dag combines on floating point adds.
25400 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25401 const X86Subtarget *Subtarget) {
25402 EVT VT = N->getValueType(0);
25403 SDValue LHS = N->getOperand(0);
25404 SDValue RHS = N->getOperand(1);
25406 // Try to synthesize horizontal adds from adds of shuffles.
25407 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25408 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25409 isHorizontalBinOp(LHS, RHS, true))
25410 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25414 /// Do target-specific dag combines on floating point subs.
25415 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25416 const X86Subtarget *Subtarget) {
25417 EVT VT = N->getValueType(0);
25418 SDValue LHS = N->getOperand(0);
25419 SDValue RHS = N->getOperand(1);
25421 // Try to synthesize horizontal subs from subs of shuffles.
25422 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25423 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25424 isHorizontalBinOp(LHS, RHS, false))
25425 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25429 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25430 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25431 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25432 // F[X]OR(0.0, x) -> x
25433 // F[X]OR(x, 0.0) -> x
25434 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25435 if (C->getValueAPF().isPosZero())
25436 return N->getOperand(1);
25437 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25438 if (C->getValueAPF().isPosZero())
25439 return N->getOperand(0);
25443 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25444 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25445 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25447 // Only perform optimizations if UnsafeMath is used.
25448 if (!DAG.getTarget().Options.UnsafeFPMath)
25451 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25452 // into FMINC and FMAXC, which are Commutative operations.
25453 unsigned NewOp = 0;
25454 switch (N->getOpcode()) {
25455 default: llvm_unreachable("unknown opcode");
25456 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25457 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25460 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25461 N->getOperand(0), N->getOperand(1));
25464 /// Do target-specific dag combines on X86ISD::FAND nodes.
25465 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25466 // FAND(0.0, x) -> 0.0
25467 // FAND(x, 0.0) -> 0.0
25468 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25469 if (C->getValueAPF().isPosZero())
25470 return N->getOperand(0);
25471 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25472 if (C->getValueAPF().isPosZero())
25473 return N->getOperand(1);
25477 /// Do target-specific dag combines on X86ISD::FANDN nodes
25478 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25479 // FANDN(x, 0.0) -> 0.0
25480 // FANDN(0.0, x) -> x
25481 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25482 if (C->getValueAPF().isPosZero())
25483 return N->getOperand(1);
25484 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25485 if (C->getValueAPF().isPosZero())
25486 return N->getOperand(1);
25490 static SDValue PerformBTCombine(SDNode *N,
25492 TargetLowering::DAGCombinerInfo &DCI) {
25493 // BT ignores high bits in the bit index operand.
25494 SDValue Op1 = N->getOperand(1);
25495 if (Op1.hasOneUse()) {
25496 unsigned BitWidth = Op1.getValueSizeInBits();
25497 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25498 APInt KnownZero, KnownOne;
25499 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25500 !DCI.isBeforeLegalizeOps());
25501 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25502 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25503 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25504 DCI.CommitTargetLoweringOpt(TLO);
25509 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25510 SDValue Op = N->getOperand(0);
25511 if (Op.getOpcode() == ISD::BITCAST)
25512 Op = Op.getOperand(0);
25513 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25514 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25515 VT.getVectorElementType().getSizeInBits() ==
25516 OpVT.getVectorElementType().getSizeInBits()) {
25517 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25522 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25523 const X86Subtarget *Subtarget) {
25524 EVT VT = N->getValueType(0);
25525 if (!VT.isVector())
25528 SDValue N0 = N->getOperand(0);
25529 SDValue N1 = N->getOperand(1);
25530 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25533 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25534 // both SSE and AVX2 since there is no sign-extended shift right
25535 // operation on a vector with 64-bit elements.
25536 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25537 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25538 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25539 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25540 SDValue N00 = N0.getOperand(0);
25542 // EXTLOAD has a better solution on AVX2,
25543 // it may be replaced with X86ISD::VSEXT node.
25544 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25545 if (!ISD::isNormalLoad(N00.getNode()))
25548 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25549 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25551 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25557 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25558 TargetLowering::DAGCombinerInfo &DCI,
25559 const X86Subtarget *Subtarget) {
25560 SDValue N0 = N->getOperand(0);
25561 EVT VT = N->getValueType(0);
25563 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25564 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25565 // This exposes the sext to the sdivrem lowering, so that it directly extends
25566 // from AH (which we otherwise need to do contortions to access).
25567 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25568 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25570 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25571 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25572 N0.getOperand(0), N0.getOperand(1));
25573 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25574 return R.getValue(1);
25577 if (!DCI.isBeforeLegalizeOps())
25580 if (!Subtarget->hasFp256())
25583 if (VT.isVector() && VT.getSizeInBits() == 256) {
25584 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25592 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25593 const X86Subtarget* Subtarget) {
25595 EVT VT = N->getValueType(0);
25597 // Let legalize expand this if it isn't a legal type yet.
25598 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25601 EVT ScalarVT = VT.getScalarType();
25602 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25603 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25606 SDValue A = N->getOperand(0);
25607 SDValue B = N->getOperand(1);
25608 SDValue C = N->getOperand(2);
25610 bool NegA = (A.getOpcode() == ISD::FNEG);
25611 bool NegB = (B.getOpcode() == ISD::FNEG);
25612 bool NegC = (C.getOpcode() == ISD::FNEG);
25614 // Negative multiplication when NegA xor NegB
25615 bool NegMul = (NegA != NegB);
25617 A = A.getOperand(0);
25619 B = B.getOperand(0);
25621 C = C.getOperand(0);
25625 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25627 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25629 return DAG.getNode(Opcode, dl, VT, A, B, C);
25632 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25633 TargetLowering::DAGCombinerInfo &DCI,
25634 const X86Subtarget *Subtarget) {
25635 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25636 // (and (i32 x86isd::setcc_carry), 1)
25637 // This eliminates the zext. This transformation is necessary because
25638 // ISD::SETCC is always legalized to i8.
25640 SDValue N0 = N->getOperand(0);
25641 EVT VT = N->getValueType(0);
25643 if (N0.getOpcode() == ISD::AND &&
25645 N0.getOperand(0).hasOneUse()) {
25646 SDValue N00 = N0.getOperand(0);
25647 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25648 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25649 if (!C || C->getZExtValue() != 1)
25651 return DAG.getNode(ISD::AND, dl, VT,
25652 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25653 N00.getOperand(0), N00.getOperand(1)),
25654 DAG.getConstant(1, VT));
25658 if (N0.getOpcode() == ISD::TRUNCATE &&
25660 N0.getOperand(0).hasOneUse()) {
25661 SDValue N00 = N0.getOperand(0);
25662 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25663 return DAG.getNode(ISD::AND, dl, VT,
25664 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25665 N00.getOperand(0), N00.getOperand(1)),
25666 DAG.getConstant(1, VT));
25669 if (VT.is256BitVector()) {
25670 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25675 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25676 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25677 // This exposes the zext to the udivrem lowering, so that it directly extends
25678 // from AH (which we otherwise need to do contortions to access).
25679 if (N0.getOpcode() == ISD::UDIVREM &&
25680 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25681 (VT == MVT::i32 || VT == MVT::i64)) {
25682 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25683 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25684 N0.getOperand(0), N0.getOperand(1));
25685 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25686 return R.getValue(1);
25692 // Optimize x == -y --> x+y == 0
25693 // x != -y --> x+y != 0
25694 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25695 const X86Subtarget* Subtarget) {
25696 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25697 SDValue LHS = N->getOperand(0);
25698 SDValue RHS = N->getOperand(1);
25699 EVT VT = N->getValueType(0);
25702 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25703 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25704 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25705 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25706 LHS.getValueType(), RHS, LHS.getOperand(1));
25707 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25708 addV, DAG.getConstant(0, addV.getValueType()), CC);
25710 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25711 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25712 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25713 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25714 RHS.getValueType(), LHS, RHS.getOperand(1));
25715 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25716 addV, DAG.getConstant(0, addV.getValueType()), CC);
25719 if (VT.getScalarType() == MVT::i1) {
25720 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25721 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25722 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25723 if (!IsSEXT0 && !IsVZero0)
25725 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25726 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25727 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25729 if (!IsSEXT1 && !IsVZero1)
25732 if (IsSEXT0 && IsVZero1) {
25733 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25734 if (CC == ISD::SETEQ)
25735 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25736 return LHS.getOperand(0);
25738 if (IsSEXT1 && IsVZero0) {
25739 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25740 if (CC == ISD::SETEQ)
25741 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25742 return RHS.getOperand(0);
25749 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25750 const X86Subtarget *Subtarget) {
25752 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25753 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25754 "X86insertps is only defined for v4x32");
25756 SDValue Ld = N->getOperand(1);
25757 if (MayFoldLoad(Ld)) {
25758 // Extract the countS bits from the immediate so we can get the proper
25759 // address when narrowing the vector load to a specific element.
25760 // When the second source op is a memory address, interps doesn't use
25761 // countS and just gets an f32 from that address.
25762 unsigned DestIndex =
25763 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25764 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25768 // Create this as a scalar to vector to match the instruction pattern.
25769 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25770 // countS bits are ignored when loading from memory on insertps, which
25771 // means we don't need to explicitly set them to 0.
25772 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25773 LoadScalarToVector, N->getOperand(2));
25776 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25777 // as "sbb reg,reg", since it can be extended without zext and produces
25778 // an all-ones bit which is more useful than 0/1 in some cases.
25779 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25782 return DAG.getNode(ISD::AND, DL, VT,
25783 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25784 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25785 DAG.getConstant(1, VT));
25786 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25787 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25788 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25789 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25792 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25793 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25794 TargetLowering::DAGCombinerInfo &DCI,
25795 const X86Subtarget *Subtarget) {
25797 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25798 SDValue EFLAGS = N->getOperand(1);
25800 if (CC == X86::COND_A) {
25801 // Try to convert COND_A into COND_B in an attempt to facilitate
25802 // materializing "setb reg".
25804 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25805 // cannot take an immediate as its first operand.
25807 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25808 EFLAGS.getValueType().isInteger() &&
25809 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25810 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25811 EFLAGS.getNode()->getVTList(),
25812 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25813 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25814 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25818 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25819 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25821 if (CC == X86::COND_B)
25822 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
25826 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25827 if (Flags.getNode()) {
25828 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25829 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
25835 // Optimize branch condition evaluation.
25837 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
25838 TargetLowering::DAGCombinerInfo &DCI,
25839 const X86Subtarget *Subtarget) {
25841 SDValue Chain = N->getOperand(0);
25842 SDValue Dest = N->getOperand(1);
25843 SDValue EFLAGS = N->getOperand(3);
25844 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
25848 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25849 if (Flags.getNode()) {
25850 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25851 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
25858 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
25859 SelectionDAG &DAG) {
25860 // Take advantage of vector comparisons producing 0 or -1 in each lane to
25861 // optimize away operation when it's from a constant.
25863 // The general transformation is:
25864 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
25865 // AND(VECTOR_CMP(x,y), constant2)
25866 // constant2 = UNARYOP(constant)
25868 // Early exit if this isn't a vector operation, the operand of the
25869 // unary operation isn't a bitwise AND, or if the sizes of the operations
25870 // aren't the same.
25871 EVT VT = N->getValueType(0);
25872 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
25873 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
25874 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
25877 // Now check that the other operand of the AND is a constant. We could
25878 // make the transformation for non-constant splats as well, but it's unclear
25879 // that would be a benefit as it would not eliminate any operations, just
25880 // perform one more step in scalar code before moving to the vector unit.
25881 if (BuildVectorSDNode *BV =
25882 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
25883 // Bail out if the vector isn't a constant.
25884 if (!BV->isConstant())
25887 // Everything checks out. Build up the new and improved node.
25889 EVT IntVT = BV->getValueType(0);
25890 // Create a new constant of the appropriate type for the transformed
25892 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
25893 // The AND node needs bitcasts to/from an integer vector type around it.
25894 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
25895 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
25896 N->getOperand(0)->getOperand(0), MaskConst);
25897 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
25904 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
25905 const X86Subtarget *Subtarget) {
25906 // First try to optimize away the conversion entirely when it's
25907 // conditionally from a constant. Vectors only.
25908 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
25909 if (Res != SDValue())
25912 // Now move on to more general possibilities.
25913 SDValue Op0 = N->getOperand(0);
25914 EVT InVT = Op0->getValueType(0);
25916 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
25917 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
25919 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
25920 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
25921 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
25924 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
25925 // a 32-bit target where SSE doesn't support i64->FP operations.
25926 if (Op0.getOpcode() == ISD::LOAD) {
25927 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
25928 EVT VT = Ld->getValueType(0);
25929 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
25930 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
25931 !Subtarget->is64Bit() && VT == MVT::i64) {
25932 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
25933 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
25934 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
25941 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
25942 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
25943 X86TargetLowering::DAGCombinerInfo &DCI) {
25944 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
25945 // the result is either zero or one (depending on the input carry bit).
25946 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
25947 if (X86::isZeroNode(N->getOperand(0)) &&
25948 X86::isZeroNode(N->getOperand(1)) &&
25949 // We don't have a good way to replace an EFLAGS use, so only do this when
25951 SDValue(N, 1).use_empty()) {
25953 EVT VT = N->getValueType(0);
25954 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
25955 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
25956 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25957 DAG.getConstant(X86::COND_B,MVT::i8),
25959 DAG.getConstant(1, VT));
25960 return DCI.CombineTo(N, Res1, CarryOut);
25966 // fold (add Y, (sete X, 0)) -> adc 0, Y
25967 // (add Y, (setne X, 0)) -> sbb -1, Y
25968 // (sub (sete X, 0), Y) -> sbb 0, Y
25969 // (sub (setne X, 0), Y) -> adc -1, Y
25970 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
25973 // Look through ZExts.
25974 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
25975 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
25978 SDValue SetCC = Ext.getOperand(0);
25979 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
25982 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
25983 if (CC != X86::COND_E && CC != X86::COND_NE)
25986 SDValue Cmp = SetCC.getOperand(1);
25987 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
25988 !X86::isZeroNode(Cmp.getOperand(1)) ||
25989 !Cmp.getOperand(0).getValueType().isInteger())
25992 SDValue CmpOp0 = Cmp.getOperand(0);
25993 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
25994 DAG.getConstant(1, CmpOp0.getValueType()));
25996 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
25997 if (CC == X86::COND_NE)
25998 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
25999 DL, OtherVal.getValueType(), OtherVal,
26000 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26001 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26002 DL, OtherVal.getValueType(), OtherVal,
26003 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26006 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26007 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26008 const X86Subtarget *Subtarget) {
26009 EVT VT = N->getValueType(0);
26010 SDValue Op0 = N->getOperand(0);
26011 SDValue Op1 = N->getOperand(1);
26013 // Try to synthesize horizontal adds from adds of shuffles.
26014 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26015 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26016 isHorizontalBinOp(Op0, Op1, true))
26017 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26019 return OptimizeConditionalInDecrement(N, DAG);
26022 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26023 const X86Subtarget *Subtarget) {
26024 SDValue Op0 = N->getOperand(0);
26025 SDValue Op1 = N->getOperand(1);
26027 // X86 can't encode an immediate LHS of a sub. See if we can push the
26028 // negation into a preceding instruction.
26029 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26030 // If the RHS of the sub is a XOR with one use and a constant, invert the
26031 // immediate. Then add one to the LHS of the sub so we can turn
26032 // X-Y -> X+~Y+1, saving one register.
26033 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26034 isa<ConstantSDNode>(Op1.getOperand(1))) {
26035 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26036 EVT VT = Op0.getValueType();
26037 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26039 DAG.getConstant(~XorC, VT));
26040 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26041 DAG.getConstant(C->getAPIntValue()+1, VT));
26045 // Try to synthesize horizontal adds from adds of shuffles.
26046 EVT VT = N->getValueType(0);
26047 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26048 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26049 isHorizontalBinOp(Op0, Op1, true))
26050 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26052 return OptimizeConditionalInDecrement(N, DAG);
26055 /// performVZEXTCombine - Performs build vector combines
26056 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26057 TargetLowering::DAGCombinerInfo &DCI,
26058 const X86Subtarget *Subtarget) {
26060 MVT VT = N->getSimpleValueType(0);
26061 SDValue Op = N->getOperand(0);
26062 MVT OpVT = Op.getSimpleValueType();
26063 MVT OpEltVT = OpVT.getVectorElementType();
26064 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26066 // (vzext (bitcast (vzext (x)) -> (vzext x)
26068 while (V.getOpcode() == ISD::BITCAST)
26069 V = V.getOperand(0);
26071 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26072 MVT InnerVT = V.getSimpleValueType();
26073 MVT InnerEltVT = InnerVT.getVectorElementType();
26075 // If the element sizes match exactly, we can just do one larger vzext. This
26076 // is always an exact type match as vzext operates on integer types.
26077 if (OpEltVT == InnerEltVT) {
26078 assert(OpVT == InnerVT && "Types must match for vzext!");
26079 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26082 // The only other way we can combine them is if only a single element of the
26083 // inner vzext is used in the input to the outer vzext.
26084 if (InnerEltVT.getSizeInBits() < InputBits)
26087 // In this case, the inner vzext is completely dead because we're going to
26088 // only look at bits inside of the low element. Just do the outer vzext on
26089 // a bitcast of the input to the inner.
26090 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26091 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26094 // Check if we can bypass extracting and re-inserting an element of an input
26095 // vector. Essentialy:
26096 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26097 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26098 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26099 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26100 SDValue ExtractedV = V.getOperand(0);
26101 SDValue OrigV = ExtractedV.getOperand(0);
26102 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26103 if (ExtractIdx->getZExtValue() == 0) {
26104 MVT OrigVT = OrigV.getSimpleValueType();
26105 // Extract a subvector if necessary...
26106 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26107 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26108 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26109 OrigVT.getVectorNumElements() / Ratio);
26110 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26111 DAG.getIntPtrConstant(0));
26113 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26114 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26121 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26122 DAGCombinerInfo &DCI) const {
26123 SelectionDAG &DAG = DCI.DAG;
26124 switch (N->getOpcode()) {
26126 case ISD::EXTRACT_VECTOR_ELT:
26127 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26130 case X86ISD::SHRUNKBLEND:
26131 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26132 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26133 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26134 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26135 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26136 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26139 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26140 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26141 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26142 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26143 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26144 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26145 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26146 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26147 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26148 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26149 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26151 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26153 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26154 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26155 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26156 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26157 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26158 case ISD::ANY_EXTEND:
26159 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26160 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26161 case ISD::SIGN_EXTEND_INREG:
26162 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26163 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26164 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26165 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26166 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26167 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26168 case X86ISD::SHUFP: // Handle all target specific shuffles
26169 case X86ISD::PALIGNR:
26170 case X86ISD::UNPCKH:
26171 case X86ISD::UNPCKL:
26172 case X86ISD::MOVHLPS:
26173 case X86ISD::MOVLHPS:
26174 case X86ISD::PSHUFB:
26175 case X86ISD::PSHUFD:
26176 case X86ISD::PSHUFHW:
26177 case X86ISD::PSHUFLW:
26178 case X86ISD::MOVSS:
26179 case X86ISD::MOVSD:
26180 case X86ISD::VPERMILPI:
26181 case X86ISD::VPERM2X128:
26182 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26183 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26184 case ISD::INTRINSIC_WO_CHAIN:
26185 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26186 case X86ISD::INSERTPS: {
26187 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26188 return PerformINSERTPSCombine(N, DAG, Subtarget);
26191 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26197 /// isTypeDesirableForOp - Return true if the target has native support for
26198 /// the specified value type and it is 'desirable' to use the type for the
26199 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26200 /// instruction encodings are longer and some i16 instructions are slow.
26201 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26202 if (!isTypeLegal(VT))
26204 if (VT != MVT::i16)
26211 case ISD::SIGN_EXTEND:
26212 case ISD::ZERO_EXTEND:
26213 case ISD::ANY_EXTEND:
26226 /// IsDesirableToPromoteOp - This method query the target whether it is
26227 /// beneficial for dag combiner to promote the specified node. If true, it
26228 /// should return the desired promotion type by reference.
26229 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26230 EVT VT = Op.getValueType();
26231 if (VT != MVT::i16)
26234 bool Promote = false;
26235 bool Commute = false;
26236 switch (Op.getOpcode()) {
26239 LoadSDNode *LD = cast<LoadSDNode>(Op);
26240 // If the non-extending load has a single use and it's not live out, then it
26241 // might be folded.
26242 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26243 Op.hasOneUse()*/) {
26244 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26245 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26246 // The only case where we'd want to promote LOAD (rather then it being
26247 // promoted as an operand is when it's only use is liveout.
26248 if (UI->getOpcode() != ISD::CopyToReg)
26255 case ISD::SIGN_EXTEND:
26256 case ISD::ZERO_EXTEND:
26257 case ISD::ANY_EXTEND:
26262 SDValue N0 = Op.getOperand(0);
26263 // Look out for (store (shl (load), x)).
26264 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26277 SDValue N0 = Op.getOperand(0);
26278 SDValue N1 = Op.getOperand(1);
26279 if (!Commute && MayFoldLoad(N1))
26281 // Avoid disabling potential load folding opportunities.
26282 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26284 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26294 //===----------------------------------------------------------------------===//
26295 // X86 Inline Assembly Support
26296 //===----------------------------------------------------------------------===//
26299 // Helper to match a string separated by whitespace.
26300 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26301 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26303 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26304 StringRef piece(*args[i]);
26305 if (!s.startswith(piece)) // Check if the piece matches.
26308 s = s.substr(piece.size());
26309 StringRef::size_type pos = s.find_first_not_of(" \t");
26310 if (pos == 0) // We matched a prefix.
26318 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26321 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26323 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26324 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26325 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26326 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26328 if (AsmPieces.size() == 3)
26330 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26337 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26338 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26340 std::string AsmStr = IA->getAsmString();
26342 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26343 if (!Ty || Ty->getBitWidth() % 16 != 0)
26346 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26347 SmallVector<StringRef, 4> AsmPieces;
26348 SplitString(AsmStr, AsmPieces, ";\n");
26350 switch (AsmPieces.size()) {
26351 default: return false;
26353 // FIXME: this should verify that we are targeting a 486 or better. If not,
26354 // we will turn this bswap into something that will be lowered to logical
26355 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26356 // lower so don't worry about this.
26358 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26359 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26360 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26361 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26362 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26363 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26364 // No need to check constraints, nothing other than the equivalent of
26365 // "=r,0" would be valid here.
26366 return IntrinsicLowering::LowerToByteSwap(CI);
26369 // rorw $$8, ${0:w} --> llvm.bswap.i16
26370 if (CI->getType()->isIntegerTy(16) &&
26371 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26372 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26373 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26375 const std::string &ConstraintsStr = IA->getConstraintString();
26376 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26377 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26378 if (clobbersFlagRegisters(AsmPieces))
26379 return IntrinsicLowering::LowerToByteSwap(CI);
26383 if (CI->getType()->isIntegerTy(32) &&
26384 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26385 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26386 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26387 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26389 const std::string &ConstraintsStr = IA->getConstraintString();
26390 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26391 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26392 if (clobbersFlagRegisters(AsmPieces))
26393 return IntrinsicLowering::LowerToByteSwap(CI);
26396 if (CI->getType()->isIntegerTy(64)) {
26397 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26398 if (Constraints.size() >= 2 &&
26399 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26400 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26401 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26402 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26403 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26404 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26405 return IntrinsicLowering::LowerToByteSwap(CI);
26413 /// getConstraintType - Given a constraint letter, return the type of
26414 /// constraint it is for this target.
26415 X86TargetLowering::ConstraintType
26416 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26417 if (Constraint.size() == 1) {
26418 switch (Constraint[0]) {
26429 return C_RegisterClass;
26453 return TargetLowering::getConstraintType(Constraint);
26456 /// Examine constraint type and operand type and determine a weight value.
26457 /// This object must already have been set up with the operand type
26458 /// and the current alternative constraint selected.
26459 TargetLowering::ConstraintWeight
26460 X86TargetLowering::getSingleConstraintMatchWeight(
26461 AsmOperandInfo &info, const char *constraint) const {
26462 ConstraintWeight weight = CW_Invalid;
26463 Value *CallOperandVal = info.CallOperandVal;
26464 // If we don't have a value, we can't do a match,
26465 // but allow it at the lowest weight.
26466 if (!CallOperandVal)
26468 Type *type = CallOperandVal->getType();
26469 // Look at the constraint type.
26470 switch (*constraint) {
26472 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26483 if (CallOperandVal->getType()->isIntegerTy())
26484 weight = CW_SpecificReg;
26489 if (type->isFloatingPointTy())
26490 weight = CW_SpecificReg;
26493 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26494 weight = CW_SpecificReg;
26498 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26499 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26500 weight = CW_Register;
26503 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26504 if (C->getZExtValue() <= 31)
26505 weight = CW_Constant;
26509 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26510 if (C->getZExtValue() <= 63)
26511 weight = CW_Constant;
26515 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26516 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26517 weight = CW_Constant;
26521 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26522 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26523 weight = CW_Constant;
26527 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26528 if (C->getZExtValue() <= 3)
26529 weight = CW_Constant;
26533 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26534 if (C->getZExtValue() <= 0xff)
26535 weight = CW_Constant;
26540 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26541 weight = CW_Constant;
26545 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26546 if ((C->getSExtValue() >= -0x80000000LL) &&
26547 (C->getSExtValue() <= 0x7fffffffLL))
26548 weight = CW_Constant;
26552 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26553 if (C->getZExtValue() <= 0xffffffff)
26554 weight = CW_Constant;
26561 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26562 /// with another that has more specific requirements based on the type of the
26563 /// corresponding operand.
26564 const char *X86TargetLowering::
26565 LowerXConstraint(EVT ConstraintVT) const {
26566 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26567 // 'f' like normal targets.
26568 if (ConstraintVT.isFloatingPoint()) {
26569 if (Subtarget->hasSSE2())
26571 if (Subtarget->hasSSE1())
26575 return TargetLowering::LowerXConstraint(ConstraintVT);
26578 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26579 /// vector. If it is invalid, don't add anything to Ops.
26580 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26581 std::string &Constraint,
26582 std::vector<SDValue>&Ops,
26583 SelectionDAG &DAG) const {
26586 // Only support length 1 constraints for now.
26587 if (Constraint.length() > 1) return;
26589 char ConstraintLetter = Constraint[0];
26590 switch (ConstraintLetter) {
26593 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26594 if (C->getZExtValue() <= 31) {
26595 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26601 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26602 if (C->getZExtValue() <= 63) {
26603 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26609 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26610 if (isInt<8>(C->getSExtValue())) {
26611 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26617 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26618 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26619 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26620 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26626 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26627 if (C->getZExtValue() <= 3) {
26628 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26634 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26635 if (C->getZExtValue() <= 255) {
26636 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26642 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26643 if (C->getZExtValue() <= 127) {
26644 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26650 // 32-bit signed value
26651 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26652 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26653 C->getSExtValue())) {
26654 // Widen to 64 bits here to get it sign extended.
26655 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26658 // FIXME gcc accepts some relocatable values here too, but only in certain
26659 // memory models; it's complicated.
26664 // 32-bit unsigned value
26665 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26666 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26667 C->getZExtValue())) {
26668 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26672 // FIXME gcc accepts some relocatable values here too, but only in certain
26673 // memory models; it's complicated.
26677 // Literal immediates are always ok.
26678 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26679 // Widen to 64 bits here to get it sign extended.
26680 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26684 // In any sort of PIC mode addresses need to be computed at runtime by
26685 // adding in a register or some sort of table lookup. These can't
26686 // be used as immediates.
26687 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26690 // If we are in non-pic codegen mode, we allow the address of a global (with
26691 // an optional displacement) to be used with 'i'.
26692 GlobalAddressSDNode *GA = nullptr;
26693 int64_t Offset = 0;
26695 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26697 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26698 Offset += GA->getOffset();
26700 } else if (Op.getOpcode() == ISD::ADD) {
26701 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26702 Offset += C->getZExtValue();
26703 Op = Op.getOperand(0);
26706 } else if (Op.getOpcode() == ISD::SUB) {
26707 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26708 Offset += -C->getZExtValue();
26709 Op = Op.getOperand(0);
26714 // Otherwise, this isn't something we can handle, reject it.
26718 const GlobalValue *GV = GA->getGlobal();
26719 // If we require an extra load to get this address, as in PIC mode, we
26720 // can't accept it.
26721 if (isGlobalStubReference(
26722 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26725 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26726 GA->getValueType(0), Offset);
26731 if (Result.getNode()) {
26732 Ops.push_back(Result);
26735 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26738 std::pair<unsigned, const TargetRegisterClass*>
26739 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26741 // First, see if this is a constraint that directly corresponds to an LLVM
26743 if (Constraint.size() == 1) {
26744 // GCC Constraint Letters
26745 switch (Constraint[0]) {
26747 // TODO: Slight differences here in allocation order and leaving
26748 // RIP in the class. Do they matter any more here than they do
26749 // in the normal allocation?
26750 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26751 if (Subtarget->is64Bit()) {
26752 if (VT == MVT::i32 || VT == MVT::f32)
26753 return std::make_pair(0U, &X86::GR32RegClass);
26754 if (VT == MVT::i16)
26755 return std::make_pair(0U, &X86::GR16RegClass);
26756 if (VT == MVT::i8 || VT == MVT::i1)
26757 return std::make_pair(0U, &X86::GR8RegClass);
26758 if (VT == MVT::i64 || VT == MVT::f64)
26759 return std::make_pair(0U, &X86::GR64RegClass);
26762 // 32-bit fallthrough
26763 case 'Q': // Q_REGS
26764 if (VT == MVT::i32 || VT == MVT::f32)
26765 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26766 if (VT == MVT::i16)
26767 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26768 if (VT == MVT::i8 || VT == MVT::i1)
26769 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26770 if (VT == MVT::i64)
26771 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26773 case 'r': // GENERAL_REGS
26774 case 'l': // INDEX_REGS
26775 if (VT == MVT::i8 || VT == MVT::i1)
26776 return std::make_pair(0U, &X86::GR8RegClass);
26777 if (VT == MVT::i16)
26778 return std::make_pair(0U, &X86::GR16RegClass);
26779 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26780 return std::make_pair(0U, &X86::GR32RegClass);
26781 return std::make_pair(0U, &X86::GR64RegClass);
26782 case 'R': // LEGACY_REGS
26783 if (VT == MVT::i8 || VT == MVT::i1)
26784 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26785 if (VT == MVT::i16)
26786 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26787 if (VT == MVT::i32 || !Subtarget->is64Bit())
26788 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26789 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26790 case 'f': // FP Stack registers.
26791 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26792 // value to the correct fpstack register class.
26793 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26794 return std::make_pair(0U, &X86::RFP32RegClass);
26795 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26796 return std::make_pair(0U, &X86::RFP64RegClass);
26797 return std::make_pair(0U, &X86::RFP80RegClass);
26798 case 'y': // MMX_REGS if MMX allowed.
26799 if (!Subtarget->hasMMX()) break;
26800 return std::make_pair(0U, &X86::VR64RegClass);
26801 case 'Y': // SSE_REGS if SSE2 allowed
26802 if (!Subtarget->hasSSE2()) break;
26804 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26805 if (!Subtarget->hasSSE1()) break;
26807 switch (VT.SimpleTy) {
26809 // Scalar SSE types.
26812 return std::make_pair(0U, &X86::FR32RegClass);
26815 return std::make_pair(0U, &X86::FR64RegClass);
26823 return std::make_pair(0U, &X86::VR128RegClass);
26831 return std::make_pair(0U, &X86::VR256RegClass);
26836 return std::make_pair(0U, &X86::VR512RegClass);
26842 // Use the default implementation in TargetLowering to convert the register
26843 // constraint into a member of a register class.
26844 std::pair<unsigned, const TargetRegisterClass*> Res;
26845 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
26847 // Not found as a standard register?
26849 // Map st(0) -> st(7) -> ST0
26850 if (Constraint.size() == 7 && Constraint[0] == '{' &&
26851 tolower(Constraint[1]) == 's' &&
26852 tolower(Constraint[2]) == 't' &&
26853 Constraint[3] == '(' &&
26854 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
26855 Constraint[5] == ')' &&
26856 Constraint[6] == '}') {
26858 Res.first = X86::FP0+Constraint[4]-'0';
26859 Res.second = &X86::RFP80RegClass;
26863 // GCC allows "st(0)" to be called just plain "st".
26864 if (StringRef("{st}").equals_lower(Constraint)) {
26865 Res.first = X86::FP0;
26866 Res.second = &X86::RFP80RegClass;
26871 if (StringRef("{flags}").equals_lower(Constraint)) {
26872 Res.first = X86::EFLAGS;
26873 Res.second = &X86::CCRRegClass;
26877 // 'A' means EAX + EDX.
26878 if (Constraint == "A") {
26879 Res.first = X86::EAX;
26880 Res.second = &X86::GR32_ADRegClass;
26886 // Otherwise, check to see if this is a register class of the wrong value
26887 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
26888 // turn into {ax},{dx}.
26889 if (Res.second->hasType(VT))
26890 return Res; // Correct type already, nothing to do.
26892 // All of the single-register GCC register classes map their values onto
26893 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
26894 // really want an 8-bit or 32-bit register, map to the appropriate register
26895 // class and return the appropriate register.
26896 if (Res.second == &X86::GR16RegClass) {
26897 if (VT == MVT::i8 || VT == MVT::i1) {
26898 unsigned DestReg = 0;
26899 switch (Res.first) {
26901 case X86::AX: DestReg = X86::AL; break;
26902 case X86::DX: DestReg = X86::DL; break;
26903 case X86::CX: DestReg = X86::CL; break;
26904 case X86::BX: DestReg = X86::BL; break;
26907 Res.first = DestReg;
26908 Res.second = &X86::GR8RegClass;
26910 } else if (VT == MVT::i32 || VT == MVT::f32) {
26911 unsigned DestReg = 0;
26912 switch (Res.first) {
26914 case X86::AX: DestReg = X86::EAX; break;
26915 case X86::DX: DestReg = X86::EDX; break;
26916 case X86::CX: DestReg = X86::ECX; break;
26917 case X86::BX: DestReg = X86::EBX; break;
26918 case X86::SI: DestReg = X86::ESI; break;
26919 case X86::DI: DestReg = X86::EDI; break;
26920 case X86::BP: DestReg = X86::EBP; break;
26921 case X86::SP: DestReg = X86::ESP; break;
26924 Res.first = DestReg;
26925 Res.second = &X86::GR32RegClass;
26927 } else if (VT == MVT::i64 || VT == MVT::f64) {
26928 unsigned DestReg = 0;
26929 switch (Res.first) {
26931 case X86::AX: DestReg = X86::RAX; break;
26932 case X86::DX: DestReg = X86::RDX; break;
26933 case X86::CX: DestReg = X86::RCX; break;
26934 case X86::BX: DestReg = X86::RBX; break;
26935 case X86::SI: DestReg = X86::RSI; break;
26936 case X86::DI: DestReg = X86::RDI; break;
26937 case X86::BP: DestReg = X86::RBP; break;
26938 case X86::SP: DestReg = X86::RSP; break;
26941 Res.first = DestReg;
26942 Res.second = &X86::GR64RegClass;
26945 } else if (Res.second == &X86::FR32RegClass ||
26946 Res.second == &X86::FR64RegClass ||
26947 Res.second == &X86::VR128RegClass ||
26948 Res.second == &X86::VR256RegClass ||
26949 Res.second == &X86::FR32XRegClass ||
26950 Res.second == &X86::FR64XRegClass ||
26951 Res.second == &X86::VR128XRegClass ||
26952 Res.second == &X86::VR256XRegClass ||
26953 Res.second == &X86::VR512RegClass) {
26954 // Handle references to XMM physical registers that got mapped into the
26955 // wrong class. This can happen with constraints like {xmm0} where the
26956 // target independent register mapper will just pick the first match it can
26957 // find, ignoring the required type.
26959 if (VT == MVT::f32 || VT == MVT::i32)
26960 Res.second = &X86::FR32RegClass;
26961 else if (VT == MVT::f64 || VT == MVT::i64)
26962 Res.second = &X86::FR64RegClass;
26963 else if (X86::VR128RegClass.hasType(VT))
26964 Res.second = &X86::VR128RegClass;
26965 else if (X86::VR256RegClass.hasType(VT))
26966 Res.second = &X86::VR256RegClass;
26967 else if (X86::VR512RegClass.hasType(VT))
26968 Res.second = &X86::VR512RegClass;
26974 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
26976 // Scaling factors are not free at all.
26977 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
26978 // will take 2 allocations in the out of order engine instead of 1
26979 // for plain addressing mode, i.e. inst (reg1).
26981 // vaddps (%rsi,%drx), %ymm0, %ymm1
26982 // Requires two allocations (one for the load, one for the computation)
26984 // vaddps (%rsi), %ymm0, %ymm1
26985 // Requires just 1 allocation, i.e., freeing allocations for other operations
26986 // and having less micro operations to execute.
26988 // For some X86 architectures, this is even worse because for instance for
26989 // stores, the complex addressing mode forces the instruction to use the
26990 // "load" ports instead of the dedicated "store" port.
26991 // E.g., on Haswell:
26992 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
26993 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
26994 if (isLegalAddressingMode(AM, Ty))
26995 // Scale represents reg2 * scale, thus account for 1
26996 // as soon as we use a second register.
26997 return AM.Scale != 0;
27001 bool X86TargetLowering::isTargetFTOL() const {
27002 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();